Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (49 commits)
  xfs simplify and speed up direct I/O completions
  xfs: move aio completion after unwritten extent conversion
  direct-io: move aio_complete into ->end_io
  xfs: fix big endian build
  xfs: clean up xfs_bmap_get_bp
  xfs: simplify xfs_truncate_file
  xfs: kill the b_strat callback in xfs_buf
  xfs: remove obsolete osyncisosync mount option
  xfs: clean up filestreams helpers
  xfs: fix gcc 4.6 set but not read and unused statement warnings
  xfs: Fix build when CONFIG_XFS_POSIX_ACL=n
  xfs: fix unsigned underflow in xfs_free_eofblocks
  xfs: use GFP_NOFS for page cache allocation
  xfs: fix memory reclaim recursion deadlock on locked inode buffer
  xfs: fix xfs_trans_add_item() lockdep warnings
  xfs: simplify and remove xfs_ireclaim
  xfs: don't block on buffer read errors
  xfs: move inode shrinker unregister even earlier
  xfs: remove a dmapi leftover
  xfs: writepage always has buffers
  ...
This commit is contained in:
Linus Torvalds 2010-08-03 14:33:38 -07:00
commit 51102ee5b8
91 changed files with 1616 additions and 4013 deletions

View File

@ -131,17 +131,6 @@ When mounting an XFS filesystem, the following options are accepted.
Don't check for double mounted file systems using the file system uuid.
This is useful to mount LVM snapshot volumes.
osyncisosync
Make O_SYNC writes implement true O_SYNC. WITHOUT this option,
Linux XFS behaves as if an "osyncisdsync" option is used,
which will make writes to files opened with the O_SYNC flag set
behave as if the O_DSYNC flag had been used instead.
This can result in better performance without compromising
data safety.
However if this option is not in effect, timestamp updates from
O_SYNC writes can be lost if the system crashes.
If timestamp updates are critical, use the osyncisosync option.
uquota/usrquota/uqnoenforce/quota
User disk quota accounting enabled, and limits (optionally)
enforced. Refer to xfs_quota(8) for further details.

View File

@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio)
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
static int dio_complete(struct dio *dio, loff_t offset, int ret)
static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
{
ssize_t transferred = 0;
@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
transferred = dio->i_size - offset;
}
if (dio->end_io && dio->result)
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private);
if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
if (ret == 0)
ret = dio->page_errors;
if (ret == 0)
@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
if (ret == 0)
ret = transferred;
if (dio->end_io && dio->result) {
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private, ret, is_async);
} else if (is_async) {
aio_complete(dio->iocb, ret, 0);
}
if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
return ret;
}
@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (remaining == 0) {
int ret = dio_complete(dio, dio->iocb->ki_pos, 0);
aio_complete(dio->iocb, ret, 0);
dio_complete(dio, dio->iocb->ki_pos, 0, true);
kfree(dio);
}
}
@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (ret2 == 0) {
ret = dio_complete(dio, offset, ret);
ret = dio_complete(dio, offset, ret, false);
kfree(dio);
} else
BUG_ON(ret != -EIOCBQUEUED);

View File

@ -3775,7 +3775,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
}
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
ssize_t size, void *private, int ret,
bool is_async)
{
ext4_io_end_t *io_end = iocb->private;
struct workqueue_struct *wq;
@ -3784,7 +3785,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
return;
goto out;
ext_debug("ext4_end_io_dio(): io_end 0x%p"
"for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
@ -3795,7 +3796,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
if (io_end->flag != EXT4_IO_UNWRITTEN){
ext4_free_io_end(io_end);
iocb->private = NULL;
return;
goto out;
}
io_end->offset = offset;
@ -3812,6 +3813,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
list_add_tail(&io_end->list, &ei->i_completed_io_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
iocb->private = NULL;
out:
if (is_async)
aio_complete(iocb, ret, 0);
}
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)

View File

@ -578,7 +578,9 @@ bail:
static void ocfs2_dio_end_io(struct kiocb *iocb,
loff_t offset,
ssize_t bytes,
void *private)
void *private,
int ret,
bool is_async)
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int level;
@ -592,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
if (!level)
up_read(&inode->i_alloc_sem);
ocfs2_rw_unlock(inode, level);
if (is_async)
aio_complete(iocb, ret, 0);
}
/*

View File

@ -87,11 +87,9 @@ xfs-y += xfs_alloc.o \
xfs_trans_buf.o \
xfs_trans_extfree.o \
xfs_trans_inode.o \
xfs_trans_item.o \
xfs_utils.o \
xfs_vnodeops.o \
xfs_rw.o \
xfs_dmops.o
xfs_rw.o
xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o

View File

@ -225,7 +225,7 @@ xfs_check_acl(struct inode *inode, int mask)
struct posix_acl *acl;
int error = -EAGAIN;
xfs_itrace_entry(ip);
trace_xfs_check_acl(ip);
/*
* If there is no attribute fork no ACL exists on this inode and

View File

@ -21,19 +21,12 @@
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_trans.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_iomap.h"
@ -92,18 +85,15 @@ void
xfs_count_page_state(
struct page *page,
int *delalloc,
int *unmapped,
int *unwritten)
{
struct buffer_head *bh, *head;
*delalloc = *unmapped = *unwritten = 0;
*delalloc = *unwritten = 0;
bh = head = page_buffers(page);
do {
if (buffer_uptodate(bh) && !buffer_mapped(bh))
(*unmapped) = 1;
else if (buffer_unwritten(bh))
if (buffer_unwritten(bh))
(*unwritten) = 1;
else if (buffer_delay(bh))
(*delalloc) = 1;
@ -212,23 +202,17 @@ xfs_setfilesize(
}
/*
* Schedule IO completion handling on a xfsdatad if this was
* the final hold on this ioend. If we are asked to wait,
* flush the workqueue.
* Schedule IO completion handling on the final put of an ioend.
*/
STATIC void
xfs_finish_ioend(
xfs_ioend_t *ioend,
int wait)
struct xfs_ioend *ioend)
{
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct workqueue_struct *wq;
wq = (ioend->io_type == IO_UNWRITTEN) ?
xfsconvertd_workqueue : xfsdatad_workqueue;
queue_work(wq, &ioend->io_work);
if (wait)
flush_workqueue(wq);
if (ioend->io_type == IO_UNWRITTEN)
queue_work(xfsconvertd_workqueue, &ioend->io_work);
else
queue_work(xfsdatad_workqueue, &ioend->io_work);
}
}
@ -272,11 +256,25 @@ xfs_end_io(
*/
if (error == EAGAIN) {
atomic_inc(&ioend->io_remaining);
xfs_finish_ioend(ioend, 0);
xfs_finish_ioend(ioend);
/* ensure we don't spin on blocked ioends */
delay(1);
} else
} else {
if (ioend->io_iocb)
aio_complete(ioend->io_iocb, ioend->io_result, 0);
xfs_destroy_ioend(ioend);
}
}
/*
* Call IO completion handling in caller context on the final put of an ioend.
*/
STATIC void
xfs_finish_ioend_sync(
struct xfs_ioend *ioend)
{
if (atomic_dec_and_test(&ioend->io_remaining))
xfs_end_io(&ioend->io_work);
}
/*
@ -309,6 +307,8 @@ xfs_alloc_ioend(
atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
ioend->io_offset = 0;
ioend->io_size = 0;
ioend->io_iocb = NULL;
ioend->io_result = 0;
INIT_WORK(&ioend->io_work, xfs_end_io);
return ioend;
@ -358,7 +358,7 @@ xfs_end_bio(
bio->bi_end_io = NULL;
bio_put(bio);
xfs_finish_ioend(ioend, 0);
xfs_finish_ioend(ioend);
}
STATIC void
@ -500,7 +500,7 @@ xfs_submit_ioend(
}
if (bio)
xfs_submit_ioend_bio(wbc, ioend, bio);
xfs_finish_ioend(ioend, 0);
xfs_finish_ioend(ioend);
} while ((ioend = next) != NULL);
}
@ -614,31 +614,30 @@ xfs_map_at_offset(
STATIC unsigned int
xfs_probe_page(
struct page *page,
unsigned int pg_offset,
int mapped)
unsigned int pg_offset)
{
struct buffer_head *bh, *head;
int ret = 0;
if (PageWriteback(page))
return 0;
if (!PageDirty(page))
return 0;
if (!page->mapping)
return 0;
if (!page_has_buffers(page))
return 0;
if (page->mapping && PageDirty(page)) {
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
bh = head = page_buffers(page);
do {
if (!buffer_uptodate(bh))
break;
if (mapped != buffer_mapped(bh))
break;
ret += bh->b_size;
if (ret >= pg_offset)
break;
} while ((bh = bh->b_this_page) != head);
} else
ret = mapped ? 0 : PAGE_CACHE_SIZE;
}
bh = head = page_buffers(page);
do {
if (!buffer_uptodate(bh))
break;
if (!buffer_mapped(bh))
break;
ret += bh->b_size;
if (ret >= pg_offset)
break;
} while ((bh = bh->b_this_page) != head);
return ret;
}
@ -648,8 +647,7 @@ xfs_probe_cluster(
struct inode *inode,
struct page *startpage,
struct buffer_head *bh,
struct buffer_head *head,
int mapped)
struct buffer_head *head)
{
struct pagevec pvec;
pgoff_t tindex, tlast, tloff;
@ -658,7 +656,7 @@ xfs_probe_cluster(
/* First sum forwards in this page */
do {
if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
if (!buffer_uptodate(bh) || !buffer_mapped(bh))
return total;
total += bh->b_size;
} while ((bh = bh->b_this_page) != head);
@ -692,7 +690,7 @@ xfs_probe_cluster(
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && trylock_page(page)) {
pg_len = xfs_probe_page(page, pg_offset, mapped);
pg_len = xfs_probe_page(page, pg_offset);
unlock_page(page);
}
@ -761,7 +759,6 @@ xfs_convert_page(
struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
int all_bh)
{
struct buffer_head *bh, *head;
@ -832,19 +829,14 @@ xfs_convert_page(
ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
xfs_map_at_offset(inode, bh, imap, offset);
if (startio) {
xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
} else {
set_buffer_dirty(bh);
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
xfs_add_to_ioend(inode, bh, offset, type,
ioendp, done);
page_dirty--;
count++;
} else {
type = IO_NEW;
if (buffer_mapped(bh) && all_bh && startio) {
if (buffer_mapped(bh) && all_bh) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
@ -859,14 +851,12 @@ xfs_convert_page(
if (uptodate && bh == head)
SetPageUptodate(page);
if (startio) {
if (count) {
wbc->nr_to_write--;
if (wbc->nr_to_write <= 0)
done = 1;
}
xfs_start_page_writeback(page, !page_dirty, count);
if (count) {
wbc->nr_to_write--;
if (wbc->nr_to_write <= 0)
done = 1;
}
xfs_start_page_writeback(page, !page_dirty, count);
return done;
fail_unlock_page:
@ -886,7 +876,6 @@ xfs_cluster_write(
struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
int all_bh,
pgoff_t tlast)
{
@ -902,7 +891,7 @@ xfs_cluster_write(
for (i = 0; i < pagevec_count(&pvec); i++) {
done = xfs_convert_page(inode, pvec.pages[i], tindex++,
imap, ioendp, wbc, startio, all_bh);
imap, ioendp, wbc, all_bh);
if (done)
break;
}
@ -981,7 +970,7 @@ xfs_aops_discard_page(
*/
error = xfs_bmapi(NULL, ip, offset_fsb, 1,
XFS_BMAPI_ENTIRE, NULL, 0, &imap,
&nimaps, NULL, NULL);
&nimaps, NULL);
if (error) {
/* something screwed, just bail */
@ -1009,7 +998,7 @@ xfs_aops_discard_page(
*/
xfs_bmap_init(&flist, &firstblock);
error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
&flist, NULL, &done);
&flist, &done);
ASSERT(!flist.xbf_count && !flist.xbf_first);
if (error) {
@ -1032,50 +1021,66 @@ out_invalidate:
}
/*
* Calling this without startio set means we are being asked to make a dirty
* page ready for freeing it's buffers. When called with startio set then
* we are coming from writepage.
* Write out a dirty page.
*
* When called with startio set it is important that we write the WHOLE
* page if possible.
* The bh->b_state's cannot know if any of the blocks or which block for
* that matter are dirty due to mmap writes, and therefore bh uptodate is
* only valid if the page itself isn't completely uptodate. Some layers
* may clear the page dirty flag prior to calling write page, under the
* assumption the entire page will be written out; by not writing out the
* whole page the page can be reused before all valid dirty data is
* written out. Note: in the case of a page that has been dirty'd by
* mapwrite and but partially setup by block_prepare_write the
* bh->b_states's will not agree and only ones setup by BPW/BCW will have
* valid state, thus the whole page must be written out thing.
* For delalloc space on the page we need to allocate space and flush it.
* For unwritten space on the page we need to start the conversion to
* regular allocated space.
* For any other dirty buffer heads on the page we should flush them.
*
* If we detect that a transaction would be required to flush the page, we
* have to check the process flags first, if we are already in a transaction
* or disk I/O during allocations is off, we need to fail the writepage and
* redirty the page.
*/
STATIC int
xfs_page_state_convert(
struct inode *inode,
struct page *page,
struct writeback_control *wbc,
int startio,
int unmapped) /* also implies page uptodate */
xfs_vm_writepage(
struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
int delalloc, unwritten;
struct buffer_head *bh, *head;
struct xfs_bmbt_irec imap;
xfs_ioend_t *ioend = NULL, *iohead = NULL;
loff_t offset;
unsigned long p_offset = 0;
unsigned int type;
__uint64_t end_offset;
pgoff_t end_index, last_index;
ssize_t size, len;
int flags, err, imap_valid = 0, uptodate = 1;
int page_dirty, count = 0;
int trylock = 0;
int all_bh = unmapped;
int count = 0;
int all_bh = 0;
if (startio) {
if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
trylock |= BMAPI_TRYLOCK;
}
trace_xfs_writepage(inode, page, 0);
ASSERT(page_has_buffers(page));
/*
* Refuse to write the page out if we are called from reclaim context.
*
* This avoids stack overflows when called from deeply used stacks in
* random callers for direct reclaim or memcg reclaim. We explicitly
* allow reclaim from kswapd as the stack usage there is relatively low.
*
* This should really be done by the core VM, but until that happens
* filesystems like XFS, btrfs and ext4 have to take care of this
* by themselves.
*/
if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
goto out_fail;
/*
* We need a transaction if there are delalloc or unwritten buffers
* on the page.
*
* If we need a transaction and the process flags say we are already
* in a transaction, or no IO is allowed then mark the page dirty
* again and leave the page as is.
*/
xfs_count_page_state(page, &delalloc, &unwritten);
if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
goto out_fail;
/* Is this page beyond the end of the file? */
offset = i_size_read(inode);
@ -1084,50 +1089,33 @@ xfs_page_state_convert(
if (page->index >= end_index) {
if ((page->index >= end_index + 1) ||
!(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
if (startio)
unlock_page(page);
unlock_page(page);
return 0;
}
}
/*
* page_dirty is initially a count of buffers on the page before
* EOF and is decremented as we move each into a cleanable state.
*
* Derivation:
*
* End offset is the highest offset that this page should represent.
* If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
* will evaluate non-zero and be less than PAGE_CACHE_SIZE and
* hence give us the correct page_dirty count. On any other page,
* it will be zero and in that case we need page_dirty to be the
* count of buffers on the page.
*/
end_offset = min_t(unsigned long long,
(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
offset);
len = 1 << inode->i_blkbits;
p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
PAGE_CACHE_SIZE);
p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
page_dirty = p_offset / len;
bh = head = page_buffers(page);
offset = page_offset(page);
flags = BMAPI_READ;
type = IO_NEW;
/* TODO: cleanup count and page_dirty */
do {
if (offset >= end_offset)
break;
if (!buffer_uptodate(bh))
uptodate = 0;
if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
/*
* the iomap is actually still valid, but the ioend
* isn't. shouldn't happen too often.
*/
/*
* A hole may still be marked uptodate because discard_buffer
* leaves the flag set.
*/
if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
ASSERT(!buffer_dirty(bh));
imap_valid = 0;
continue;
}
@ -1135,19 +1123,7 @@ xfs_page_state_convert(
if (imap_valid)
imap_valid = xfs_imap_valid(inode, &imap, offset);
/*
* First case, map an unwritten extent and prepare for
* extent state conversion transaction on completion.
*
* Second case, allocate space for a delalloc buffer.
* We can return EAGAIN here in the release page case.
*
* Third case, an unmapped buffer was found, and we are
* in a path where we need to write the whole page out.
*/
if (buffer_unwritten(bh) || buffer_delay(bh) ||
((buffer_uptodate(bh) || PageUptodate(page)) &&
!buffer_mapped(bh) && (unmapped || startio))) {
if (buffer_unwritten(bh) || buffer_delay(bh)) {
int new_ioend = 0;
/*
@ -1161,15 +1137,16 @@ xfs_page_state_convert(
flags = BMAPI_WRITE | BMAPI_IGNSTATE;
} else if (buffer_delay(bh)) {
type = IO_DELAY;
flags = BMAPI_ALLOCATE | trylock;
} else {
type = IO_NEW;
flags = BMAPI_WRITE | BMAPI_MMAP;
flags = BMAPI_ALLOCATE;
if (wbc->sync_mode == WB_SYNC_NONE &&
wbc->nonblocking)
flags |= BMAPI_TRYLOCK;
}
if (!imap_valid) {
/*
* if we didn't have a valid mapping then we
* If we didn't have a valid mapping then we
* need to ensure that we put the new mapping
* in a new ioend structure. This needs to be
* done to ensure that the ioends correctly
@ -1177,14 +1154,7 @@ xfs_page_state_convert(
* for unwritten extent conversion.
*/
new_ioend = 1;
if (type == IO_NEW) {
size = xfs_probe_cluster(inode,
page, bh, head, 0);
} else {
size = len;
}
err = xfs_map_blocks(inode, offset, size,
err = xfs_map_blocks(inode, offset, len,
&imap, flags);
if (err)
goto error;
@ -1193,19 +1163,11 @@ xfs_page_state_convert(
}
if (imap_valid) {
xfs_map_at_offset(inode, bh, &imap, offset);
if (startio) {
xfs_add_to_ioend(inode, bh, offset,
type, &ioend,
new_ioend);
} else {
set_buffer_dirty(bh);
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
page_dirty--;
xfs_add_to_ioend(inode, bh, offset, type,
&ioend, new_ioend);
count++;
}
} else if (buffer_uptodate(bh) && startio) {
} else if (buffer_uptodate(bh)) {
/*
* we got here because the buffer is already mapped.
* That means it must already have extents allocated
@ -1213,8 +1175,7 @@ xfs_page_state_convert(
*/
if (!imap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ;
size = xfs_probe_cluster(inode, page, bh,
head, 1);
size = xfs_probe_cluster(inode, page, bh, head);
err = xfs_map_blocks(inode, offset, size,
&imap, flags);
if (err)
@ -1233,18 +1194,16 @@ xfs_page_state_convert(
*/
type = IO_NEW;
if (trylock_buffer(bh)) {
ASSERT(buffer_mapped(bh));
if (imap_valid)
all_bh = 1;
xfs_add_to_ioend(inode, bh, offset, type,
&ioend, !imap_valid);
page_dirty--;
count++;
} else {
imap_valid = 0;
}
} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(unmapped || startio)) {
} else if (PageUptodate(page)) {
ASSERT(buffer_mapped(bh));
imap_valid = 0;
}
@ -1256,8 +1215,7 @@ xfs_page_state_convert(
if (uptodate && bh == head)
SetPageUptodate(page);
if (startio)
xfs_start_page_writeback(page, 1, count);
xfs_start_page_writeback(page, 1, count);
if (ioend && imap_valid) {
xfs_off_t end_index;
@ -1275,131 +1233,27 @@ xfs_page_state_convert(
end_index = last_index;
xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
wbc, startio, all_bh, end_index);
wbc, all_bh, end_index);
}
if (iohead)
xfs_submit_ioend(wbc, iohead);
return page_dirty;
return 0;
error:
if (iohead)
xfs_cancel_ioend(iohead);
/*
* If it's delalloc and we have nowhere to put it,
* throw it away, unless the lower layers told
* us to try again.
*/
if (err != -EAGAIN) {
if (!unmapped)
xfs_aops_discard_page(page);
ClearPageUptodate(page);
}
xfs_aops_discard_page(page);
ClearPageUptodate(page);
unlock_page(page);
return err;
}
/*
* writepage: Called from one of two places:
*
* 1. we are flushing a delalloc buffer head.
*
* 2. we are writing out a dirty page. Typically the page dirty
* state is cleared before we get here. In this case is it
* conceivable we have no buffer heads.
*
* For delalloc space on the page we need to allocate space and
* flush it. For unmapped buffer heads on the page we should
* allocate space if the page is uptodate. For any other dirty
* buffer heads on the page we should flush them.
*
* If we detect that a transaction would be required to flush
* the page, we have to check the process flags first, if we
* are already in a transaction or disk I/O during allocations
* is off, we need to fail the writepage and redirty the page.
*/
STATIC int
xfs_vm_writepage(
struct page *page,
struct writeback_control *wbc)
{
int error;
int need_trans;
int delalloc, unmapped, unwritten;
struct inode *inode = page->mapping->host;
trace_xfs_writepage(inode, page, 0);
/*
* Refuse to write the page out if we are called from reclaim context.
*
* This is primarily to avoid stack overflows when called from deep
* used stacks in random callers for direct reclaim, but disabling
* reclaim for kswap is a nice side-effect as kswapd causes rather
* suboptimal I/O patters, too.
*
* This should really be done by the core VM, but until that happens
* filesystems like XFS, btrfs and ext4 have to take care of this
* by themselves.
*/
if (current->flags & PF_MEMALLOC)
goto out_fail;
/*
* We need a transaction if:
* 1. There are delalloc buffers on the page
* 2. The page is uptodate and we have unmapped buffers
* 3. The page is uptodate and we have no buffers
* 4. There are unwritten buffers on the page
*/
if (!page_has_buffers(page)) {
unmapped = 1;
need_trans = 1;
} else {
xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
if (!PageUptodate(page))
unmapped = 0;
need_trans = delalloc + unmapped + unwritten;
}
/*
* If we need a transaction and the process flags say
* we are already in a transaction, or no IO is allowed
* then mark the page dirty again and leave the page
* as is.
*/
if (current_test_flags(PF_FSTRANS) && need_trans)
goto out_fail;
/*
* Delay hooking up buffer heads until we have
* made our go/no-go decision.
*/
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
/*
* Convert delayed allocate, unwritten or unmapped space
* to real space and flush out to disk.
*/
error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
if (error == -EAGAIN)
goto out_fail;
if (unlikely(error < 0))
goto out_unlock;
return 0;
out_fail:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
out_unlock:
unlock_page(page);
return error;
}
STATIC int
@ -1413,65 +1267,27 @@ xfs_vm_writepages(
/*
* Called to move a page into cleanable state - and from there
* to be released. Possibly the page is already clean. We always
* to be released. The page should already be clean. We always
* have buffer heads in this call.
*
* Returns 0 if the page is ok to release, 1 otherwise.
*
* Possible scenarios are:
*
* 1. We are being called to release a page which has been written
* to via regular I/O. buffer heads will be dirty and possibly
* delalloc. If no delalloc buffer heads in this case then we
* can just return zero.
*
* 2. We are called to release a page which has been written via
* mmap, all we need to do is ensure there is no delalloc
* state in the buffer heads, if not we can let the caller
* free them and we should come back later via writepage.
* Returns 1 if the page is ok to release, 0 otherwise.
*/
STATIC int
xfs_vm_releasepage(
struct page *page,
gfp_t gfp_mask)
{
struct inode *inode = page->mapping->host;
int dirty, delalloc, unmapped, unwritten;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
};
int delalloc, unwritten;
trace_xfs_releasepage(inode, page, 0);
trace_xfs_releasepage(page->mapping->host, page, 0);
if (!page_has_buffers(page))
xfs_count_page_state(page, &delalloc, &unwritten);
if (WARN_ON(delalloc))
return 0;
if (WARN_ON(unwritten))
return 0;
xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
if (!delalloc && !unwritten)
goto free_buffers;
if (!(gfp_mask & __GFP_FS))
return 0;
/* If we are already inside a transaction or the thread cannot
* do I/O, we cannot release this page.
*/
if (current_test_flags(PF_FSTRANS))
return 0;
/*
* Convert delalloc space to real space, do not flush the
* data out to disk, that will be done by the caller.
* Never need to allocate space here - we will always
* come back to writepage in that case.
*/
dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
if (dirty == 0 && !unwritten)
goto free_buffers;
return 0;
free_buffers:
return try_to_free_buffers(page);
}
@ -1481,9 +1297,9 @@ __xfs_get_blocks(
sector_t iblock,
struct buffer_head *bh_result,
int create,
int direct,
bmapi_flags_t flags)
int direct)
{
int flags = create ? BMAPI_WRITE : BMAPI_READ;
struct xfs_bmbt_irec imap;
xfs_off_t offset;
ssize_t size;
@ -1498,8 +1314,11 @@ __xfs_get_blocks(
if (!create && direct && offset >= i_size_read(inode))
return 0;
error = xfs_iomap(XFS_I(inode), offset, size,
create ? flags : BMAPI_READ, &imap, &nimap, &new);
if (direct && create)
flags |= BMAPI_DIRECT;
error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
&new);
if (error)
return -error;
if (nimap == 0)
@ -1579,8 +1398,7 @@ xfs_get_blocks(
struct buffer_head *bh_result,
int create)
{
return __xfs_get_blocks(inode, iblock,
bh_result, create, 0, BMAPI_WRITE);
return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
}
STATIC int
@ -1590,54 +1408,30 @@ xfs_get_blocks_direct(
struct buffer_head *bh_result,
int create)
{
return __xfs_get_blocks(inode, iblock,
bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
}
/*
* Complete a direct I/O write request.
*
* If the private argument is non-NULL __xfs_get_blocks signals us that we
* need to issue a transaction to convert the range from unwritten to written
* extents. In case this is regular synchronous I/O we just call xfs_end_io
* to do this and we are done. But in case this was a successfull AIO
* request this handler is called from interrupt context, from which we
* can't start transactions. In that case offload the I/O completion to
* the workqueues we also use for buffered I/O completion.
*/
STATIC void
xfs_end_io_direct(
struct kiocb *iocb,
loff_t offset,
ssize_t size,
void *private)
xfs_end_io_direct_write(
struct kiocb *iocb,
loff_t offset,
ssize_t size,
void *private,
int ret,
bool is_async)
{
xfs_ioend_t *ioend = iocb->private;
/*
* Non-NULL private data means we need to issue a transaction to
* convert a range from unwritten to written extents. This needs
* to happen from process context but aio+dio I/O completion
* happens from irq context so we need to defer it to a workqueue.
* This is not necessary for synchronous direct I/O, but we do
* it anyway to keep the code uniform and simpler.
*
* Well, if only it were that simple. Because synchronous direct I/O
* requires extent conversion to occur *before* we return to userspace,
* we have to wait for extent conversion to complete. Look at the
* iocb that has been passed to us to determine if this is AIO or
* not. If it is synchronous, tell xfs_finish_ioend() to kick the
* workqueue and wait for it to complete.
*
* The core direct I/O code might be changed to always call the
* completion handler in the future, in which case all this can
* go away.
*/
ioend->io_offset = offset;
ioend->io_size = size;
if (ioend->io_type == IO_READ) {
xfs_finish_ioend(ioend, 0);
} else if (private && size > 0) {
xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
} else {
/*
* A direct I/O write ioend starts it's life in unwritten
* state in case they map an unwritten extent. This write
* didn't map an unwritten extent so switch it's completion
* handler.
*/
ioend->io_type = IO_NEW;
xfs_finish_ioend(ioend, 0);
}
struct xfs_ioend *ioend = iocb->private;
/*
* blockdev_direct_IO can return an error even after the I/O
@ -1645,6 +1439,28 @@ xfs_end_io_direct(
* against double-freeing.
*/
iocb->private = NULL;
ioend->io_offset = offset;
ioend->io_size = size;
if (private && size > 0)
ioend->io_type = IO_UNWRITTEN;
if (is_async) {
/*
* If we are converting an unwritten extent we need to delay
* the AIO completion until after the unwrittent extent
* conversion has completed, otherwise do it ASAP.
*/
if (ioend->io_type == IO_UNWRITTEN) {
ioend->io_iocb = iocb;
ioend->io_result = ret;
} else {
aio_complete(iocb, ret, 0);
}
xfs_finish_ioend(ioend);
} else {
xfs_finish_ioend_sync(ioend);
}
}
STATIC ssize_t
@ -1655,23 +1471,26 @@ xfs_vm_direct_IO(
loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct block_device *bdev;
ssize_t ret;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
ssize_t ret;
bdev = xfs_find_bdev_for_inode(inode);
if (rw & WRITE) {
iocb->private = xfs_alloc_ioend(inode, IO_NEW);
iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
IO_UNWRITTEN : IO_READ);
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
offset, nr_segs,
xfs_get_blocks_direct,
xfs_end_io_direct_write);
if (ret != -EIOCBQUEUED && iocb->private)
xfs_destroy_ioend(iocb->private);
} else {
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
offset, nr_segs,
xfs_get_blocks_direct,
NULL);
}
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
offset, nr_segs,
xfs_get_blocks_direct,
xfs_end_io_direct);
if (unlikely(ret != -EIOCBQUEUED && iocb->private))
xfs_destroy_ioend(iocb->private);
return ret;
}
@ -1686,8 +1505,8 @@ xfs_vm_write_begin(
void **fsdata)
{
*pagep = NULL;
return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
xfs_get_blocks);
return block_write_begin(file, mapping, pos, len, flags | AOP_FLAG_NOFS,
pagep, fsdata, xfs_get_blocks);
}
STATIC sector_t
@ -1698,7 +1517,7 @@ xfs_vm_bmap(
struct inode *inode = (struct inode *)mapping->host;
struct xfs_inode *ip = XFS_I(inode);
xfs_itrace_entry(XFS_I(inode));
trace_xfs_vm_bmap(XFS_I(inode));
xfs_ilock(ip, XFS_IOLOCK_SHARED);
xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);

View File

@ -37,6 +37,8 @@ typedef struct xfs_ioend {
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
struct kiocb *io_iocb;
int io_result;
} xfs_ioend_t;
extern const struct address_space_operations xfs_address_space_operations;
@ -45,6 +47,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
extern void xfs_ioend_init(void);
extern void xfs_ioend_wait(struct xfs_inode *);
extern void xfs_count_page_state(struct page *, int *, int *, int *);
extern void xfs_count_page_state(struct page *, int *, int *);
#endif /* __XFS_AOPS_H__ */

View File

@ -39,7 +39,6 @@
#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
@ -579,9 +578,9 @@ _xfs_buf_read(
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
status = xfs_buf_iorequest(bp);
if (!status && !(flags & XBF_ASYNC))
status = xfs_buf_iowait(bp);
return status;
if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
return status;
return xfs_buf_iowait(bp);
}
xfs_buf_t *
@ -897,36 +896,6 @@ xfs_buf_unlock(
trace_xfs_buf_unlock(bp, _RET_IP_);
}
/*
* Pinning Buffer Storage in Memory
* Ensure that no attempt to force a buffer to disk will succeed.
*/
void
xfs_buf_pin(
xfs_buf_t *bp)
{
trace_xfs_buf_pin(bp, _RET_IP_);
atomic_inc(&bp->b_pin_count);
}
void
xfs_buf_unpin(
xfs_buf_t *bp)
{
trace_xfs_buf_unpin(bp, _RET_IP_);
if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters);
}
int
xfs_buf_ispin(
xfs_buf_t *bp)
{
return atomic_read(&bp->b_pin_count);
}
STATIC void
xfs_buf_wait_unpin(
xfs_buf_t *bp)
@ -1018,13 +987,12 @@ xfs_bwrite(
{
int error;
bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
xfs_buf_delwri_dequeue(bp);
xfs_buf_iostrategy(bp);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error)
@ -1040,7 +1008,6 @@ xfs_bdwrite(
{
trace_xfs_buf_bdwrite(bp, _RET_IP_);
bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
bp->b_flags &= ~XBF_READ;
@ -1075,7 +1042,6 @@ xfs_bioerror(
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
xfs_biodone(bp);
return EIO;
@ -1105,7 +1071,6 @@ xfs_bioerror_relse(
XFS_BUF_DONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_CLR_IODONE_FUNC(bp);
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
if (!(fl & XBF_ASYNC)) {
/*
* Mark b_error and B_ERROR _both_.
@ -1311,8 +1276,19 @@ submit_io:
if (size)
goto next_chunk;
} else {
bio_put(bio);
/*
* if we get here, no pages were added to the bio. However,
* we can't just error out here - if the pages are locked then
* we have to unlock them otherwise we can hang on a later
* access to the page.
*/
xfs_buf_ioerror(bp, EIO);
if (bp->b_flags & _XBF_PAGE_LOCKED) {
int i;
for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->b_pages[i]);
}
bio_put(bio);
}
}
@ -1804,7 +1780,7 @@ xfs_buf_delwri_split(
trace_xfs_buf_delwri_split(bp, _RET_IP_);
ASSERT(bp->b_flags & XBF_DELWRI);
if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
if (!force &&
time_before(jiffies, bp->b_queuetime + age)) {
xfs_buf_unlock(bp);
@ -1889,7 +1865,7 @@ xfsbufd(
struct xfs_buf *bp;
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_buf_iostrategy(bp);
xfs_bdstrat_cb(bp);
count++;
}
if (count)
@ -1936,7 +1912,7 @@ xfs_flush_buftarg(
bp->b_flags &= ~XBF_ASYNC;
list_add(&bp->b_list, &wait_list);
}
xfs_buf_iostrategy(bp);
xfs_bdstrat_cb(bp);
}
if (wait) {

View File

@ -44,57 +44,57 @@ typedef enum {
XBRW_ZERO = 3, /* Zero target memory */
} xfs_buf_rw_t;
typedef enum {
XBF_READ = (1 << 0), /* buffer intended for reading from device */
XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
XBF_ORDERED = (1 << 11), /* use ordered writes */
XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log */
#define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
#define XBF_ORDERED (1 << 11)/* use ordered writes */
#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
/* flags used only as arguments to access routines */
XBF_LOCK = (1 << 14), /* lock requested */
XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
/* flags used only as arguments to access routines */
#define XBF_LOCK (1 << 14)/* lock requested */
#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */
#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
/* flags used only internally */
_XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
_XBF_PAGES = (1 << 18), /* backed by refcounted pages */
_XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
_XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
/* flags used only internally */
#define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */
#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
/*
* Special flag for supporting metadata blocks smaller than a FSB.
*
* In this case we can have multiple xfs_buf_t on a single page and
* need to lock out concurrent xfs_buf_t readers as they only
* serialise access to the buffer.
*
* If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
* between reads of the page. Hence we can have one thread read the
* page and modify it, but then race with another thread that thinks
* the page is not up-to-date and hence reads it again.
*
* The result is that the first modifcation to the page is lost.
* This sort of AGF/AGI reading race can happen when unlinking inodes
* that require truncation and results in the AGI unlinked list
* modifications being lost.
*/
_XBF_PAGE_LOCKED = (1 << 22),
/*
* Special flag for supporting metadata blocks smaller than a FSB.
*
* In this case we can have multiple xfs_buf_t on a single page and
* need to lock out concurrent xfs_buf_t readers as they only
* serialise access to the buffer.
*
* If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
* between reads of the page. Hence we can have one thread read the
* page and modify it, but then race with another thread that thinks
* the page is not up-to-date and hence reads it again.
*
* The result is that the first modifcation to the page is lost.
* This sort of AGF/AGI reading race can happen when unlinking inodes
* that require truncation and results in the AGI unlinked list
* modifications being lost.
*/
#define _XBF_PAGE_LOCKED (1 << 22)
/*
* If we try a barrier write, but it fails we have to communicate
* this to the upper layers. Unfortunately b_error gets overwritten
* when the buffer is re-issued so we have to add another flag to
* keep this information.
*/
_XFS_BARRIER_FAILED = (1 << 23),
} xfs_buf_flags_t;
/*
* If we try a barrier write, but it fails we have to communicate
* this to the upper layers. Unfortunately b_error gets overwritten
* when the buffer is re-issued so we have to add another flag to
* keep this information.
*/
#define _XFS_BARRIER_FAILED (1 << 23)
typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
{ XBF_READ, "READ" }, \
@ -187,7 +187,6 @@ typedef struct xfs_buf {
atomic_t b_io_remaining; /* #outstanding I/O requests */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
xfs_buf_relse_t b_relse; /* releasing function */
xfs_buf_bdstrat_t b_strat; /* pre-write function */
struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv;
void *b_fspriv2;
@ -245,11 +244,6 @@ extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
{
return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
}
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
return bp ? bp->b_error : ENOMEM;
@ -258,11 +252,6 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp)
/* Buffer Utility Routines */
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
/* Pinning Buffer Storage in Memory */
extern void xfs_buf_pin(xfs_buf_t *);
extern void xfs_buf_unpin(xfs_buf_t *);
extern int xfs_buf_ispin(xfs_buf_t *);
/* Delayed Write Buffer Routines */
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
extern void xfs_buf_delwri_promote(xfs_buf_t *);
@ -326,8 +315,6 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
@ -351,7 +338,7 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
@ -370,8 +357,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
xfs_buf_rele(bp);
}
#define xfs_bpin(bp) xfs_buf_pin(bp)
#define xfs_bunpin(bp) xfs_buf_unpin(bp)
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
#define xfs_biomove(bp, off, len, data, rw) \

View File

@ -1,28 +0,0 @@
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_DMAPI_PRIV_H__
#define __XFS_DMAPI_PRIV_H__
/*
* Based on IO_ISDIRECT, decide which i_ flag is set.
*/
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
DM_FLAGS_IMUX : 0)
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
#endif /*__XFS_DMAPI_PRIV_H__*/

View File

@ -23,13 +23,13 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_export.h"
#include "xfs_vnodeops.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
/*
* Note that we only accept fileids which are long enough rather than allow
@ -132,8 +132,7 @@ xfs_nfs_get_inode(
* fine and not an indication of a corrupted filesystem as clients can
* send invalid file handles and we have to handle it gracefully..
*/
error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED,
XFS_ILOCK_SHARED, &ip);
error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
if (error) {
/*
* EINVAL means the inode cluster doesn't exist anymore.
@ -148,11 +147,10 @@ xfs_nfs_get_inode(
}
if (ip->i_d.di_gen != generation) {
xfs_iput_new(ip, XFS_ILOCK_SHARED);
IRELE(ip);
return ERR_PTR(-ENOENT);
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return VFS_I(ip);
}

View File

@ -22,23 +22,15 @@
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_trans.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
#include "xfs_da_btree.h"
#include "xfs_ioctl.h"
@ -108,7 +100,7 @@ xfs_file_fsync(
int error = 0;
int log_flushed = 0;
xfs_itrace_entry(ip);
trace_xfs_file_fsync(ip);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -XFS_ERROR(EIO);
@ -166,8 +158,7 @@ xfs_file_fsync(
* transaction. So we play it safe and fire off the
* transaction anyway.
*/
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
error = _xfs_trans_commit(tp, 0, &log_flushed);
@ -275,20 +266,6 @@ xfs_file_aio_read(
mutex_lock(&inode->i_mutex);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
int iolock = XFS_IOLOCK_SHARED;
ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,
dmflags, &iolock);
if (ret) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
if (unlikely(ioflags & IO_ISDIRECT))
mutex_unlock(&inode->i_mutex);
return ret;
}
}
if (unlikely(ioflags & IO_ISDIRECT)) {
if (inode->i_mapping->nrpages) {
ret = -xfs_flushinval_pages(ip,
@ -321,7 +298,6 @@ xfs_file_splice_read(
unsigned int flags)
{
struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
struct xfs_mount *mp = ip->i_mount;
int ioflags = 0;
ssize_t ret;
@ -335,18 +311,6 @@ xfs_file_splice_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
int iolock = XFS_IOLOCK_SHARED;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
FILP_DELAY_FLAG(infilp), &iolock);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
}
}
trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
@ -367,7 +331,6 @@ xfs_file_splice_write(
{
struct inode *inode = outfilp->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_fsize_t isize, new_size;
int ioflags = 0;
ssize_t ret;
@ -382,18 +345,6 @@ xfs_file_splice_write(
xfs_ilock(ip, XFS_IOLOCK_EXCL);
if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
int iolock = XFS_IOLOCK_EXCL;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
FILP_DELAY_FLAG(outfilp), &iolock);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return -error;
}
}
new_size = *ppos + count;
xfs_ilock(ip, XFS_ILOCK_EXCL);
@ -463,7 +414,7 @@ xfs_zero_last_block(
last_fsb = XFS_B_TO_FSBT(mp, isize);
nimaps = 1;
error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
&nimaps, NULL, NULL);
&nimaps, NULL);
if (error) {
return error;
}
@ -558,7 +509,7 @@ xfs_zero_eof(
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
0, NULL, 0, &imap, &nimaps, NULL, NULL);
0, NULL, 0, &imap, &nimaps, NULL);
if (error) {
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
return error;
@ -627,7 +578,6 @@ xfs_file_aio_write(
int ioflags = 0;
xfs_fsize_t isize, new_size;
int iolock;
int eventsent = 0;
size_t ocount = 0, count;
int need_i_mutex;
@ -673,33 +623,6 @@ start:
goto out_unlock_mutex;
}
if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) &&
!(ioflags & IO_INVIS) && !eventsent)) {
int dmflags = FILP_DELAY_FLAG(file);
if (need_i_mutex)
dmflags |= DM_FLAGS_IMUX;
xfs_iunlock(ip, XFS_ILOCK_EXCL);
error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip,
pos, count, dmflags, &iolock);
if (error) {
goto out_unlock_internal;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
eventsent = 1;
/*
* The iolock was dropped and reacquired in XFS_SEND_DATA
* so we have to recheck the size when appending.
* We will only "goto start;" once, since having sent the
* event prevents another call to XFS_SEND_DATA, which is
* what allows the size to change in the first place.
*/
if ((file->f_flags & O_APPEND) && pos != ip->i_size)
goto start;
}
if (ioflags & IO_ISDIRECT) {
xfs_buftarg_t *target =
XFS_IS_REALTIME_INODE(ip) ?
@ -830,22 +753,6 @@ write_retry:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
if (ret == -ENOSPC &&
DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
xfs_iunlock(ip, iolock);
if (need_i_mutex)
mutex_unlock(&inode->i_mutex);
error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip,
DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (need_i_mutex)
mutex_lock(&inode->i_mutex);
xfs_ilock(ip, iolock);
if (error)
goto out_unlock_internal;
goto start;
}
error = -ret;
if (ret <= 0)
goto out_unlock_internal;
@ -1014,9 +921,6 @@ const struct file_operations xfs_file_operations = {
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
#ifdef HAVE_FOP_OPEN_EXEC
.open_exec = xfs_file_open_exec,
#endif
};
const struct file_operations xfs_dir_file_operations = {

View File

@ -21,10 +21,6 @@
#include "xfs_inode.h"
#include "xfs_trace.h"
int fs_noerr(void) { return 0; }
int fs_nosys(void) { return ENOSYS; }
void fs_noval(void) { return; }
/*
* note: all filemap functions return negative error codes. These
* need to be inverted before returning to the xfs core functions.

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_FS_SUBR_H__
#define __XFS_FS_SUBR_H__
extern int fs_noerr(void);
extern int fs_nosys(void);
extern void fs_noval(void);
#endif /* __XFS_FS_SUBR_H__ */

View File

@ -23,24 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ioctl.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_itable.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_bmap.h"
#include "xfs_buf_item.h"
@ -908,7 +899,7 @@ xfs_ioctl_setattr(
struct xfs_dquot *olddquot = NULL;
int code;
xfs_itrace_entry(ip);
trace_xfs_ioctl_setattr(ip);
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
@ -1043,8 +1034,7 @@ xfs_ioctl_setattr(
}
}
xfs_trans_ijoin(tp, ip, lock_flags);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
/*
* Change file ownership. Must be the owner or privileged.
@ -1116,16 +1106,7 @@ xfs_ioctl_setattr(
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
if (code)
return code;
if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) {
XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0,
(mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0);
}
return 0;
return code;
error_return:
xfs_qm_dqrele(udqp);
@ -1301,7 +1282,7 @@ xfs_file_ioctl(
if (filp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
xfs_itrace_entry(ip);
trace_xfs_file_ioctl(ip);
switch (cmd) {
case XFS_IOC_ALLOCSP:

View File

@ -28,12 +28,8 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_vnode.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@ -544,7 +540,7 @@ xfs_file_compat_ioctl(
if (filp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
xfs_itrace_entry(ip);
trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
/* No size or alignment issues on any arch */

View File

@ -24,21 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
@ -496,7 +488,7 @@ xfs_vn_getattr(
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_itrace_entry(ip);
trace_xfs_getattr(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);

View File

@ -87,7 +87,6 @@
#include <xfs_aops.h>
#include <xfs_super.h>
#include <xfs_globals.h>
#include <xfs_fs_subr.h>
#include <xfs_buf.h>
/*

View File

@ -16,7 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_dmapi.h"
#include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_log.h"

View File

@ -25,14 +25,11 @@
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@ -43,7 +40,6 @@
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_fsops.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
@ -94,7 +90,6 @@ mempool_t *xfs_ioend_pool;
#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
* unwritten extent conversion */
#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
@ -116,9 +111,6 @@ mempool_t *xfs_ioend_pool;
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
@ -172,15 +164,13 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
STATIC int
xfs_parseargs(
struct xfs_mount *mp,
char *options,
char **mtpt)
char *options)
{
struct super_block *sb = mp->m_super;
char *this_char, *value, *eov;
int dsunit = 0;
int dswidth = 0;
int iosize = 0;
int dmapi_implies_ikeep = 1;
__uint8_t iosizelog = 0;
/*
@ -243,15 +233,10 @@ xfs_parseargs(
if (!mp->m_logname)
return ENOMEM;
} else if (!strcmp(this_char, MNTOPT_MTPT)) {
if (!value || !*value) {
cmn_err(CE_WARN,
"XFS: %s option requires an argument",
this_char);
return EINVAL;
}
*mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
if (!*mtpt)
return ENOMEM;
cmn_err(CE_WARN,
"XFS: %s option not allowed on this system",
this_char);
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
if (!value || !*value) {
cmn_err(CE_WARN,
@ -288,8 +273,6 @@ xfs_parseargs(
mp->m_flags &= ~XFS_MOUNT_GRPID;
} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
mp->m_flags |= XFS_MOUNT_WSYNC;
} else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
mp->m_flags |= XFS_MOUNT_NORECOVERY;
} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
@ -329,7 +312,6 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
mp->m_flags |= XFS_MOUNT_IKEEP;
} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
dmapi_implies_ikeep = 0;
mp->m_flags &= ~XFS_MOUNT_IKEEP;
} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
@ -370,12 +352,6 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_OQUOTA_ENFD;
} else if (!strcmp(this_char, MNTOPT_DMAPI)) {
mp->m_flags |= XFS_MOUNT_DMAPI;
} else if (!strcmp(this_char, MNTOPT_XDSM)) {
mp->m_flags |= XFS_MOUNT_DMAPI;
} else if (!strcmp(this_char, MNTOPT_DMI)) {
mp->m_flags |= XFS_MOUNT_DMAPI;
} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
mp->m_flags |= XFS_MOUNT_DELAYLOG;
cmn_err(CE_WARN,
@ -387,9 +363,11 @@ xfs_parseargs(
cmn_err(CE_WARN,
"XFS: ihashsize no longer used, option is deprecated.");
} else if (!strcmp(this_char, "osyncisdsync")) {
/* no-op, this is now the default */
cmn_err(CE_WARN,
"XFS: osyncisdsync is now the default, option is deprecated.");
"XFS: osyncisdsync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "osyncisosync")) {
cmn_err(CE_WARN,
"XFS: osyncisosync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "irixsgid")) {
cmn_err(CE_WARN,
"XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
@ -430,12 +408,6 @@ xfs_parseargs(
return EINVAL;
}
if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
printk("XFS: %s option needs the mount point option as well\n",
MNTOPT_DMAPI);
return EINVAL;
}
if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
cmn_err(CE_WARN,
"XFS: sunit and swidth must be specified together");
@ -449,18 +421,6 @@ xfs_parseargs(
return EINVAL;
}
/*
* Applications using DMI filesystems often expect the
* inode generation number to be monotonically increasing.
* If we delete inode chunks we break this assumption, so
* keep unused inode chunks on disk for DMI filesystems
* until we come up with a better solution.
* Note that if "ikeep" or "noikeep" mount options are
* supplied, then they are honored.
*/
if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
mp->m_flags |= XFS_MOUNT_IKEEP;
done:
if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
/*
@ -539,10 +499,8 @@ xfs_showargs(
{ XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
{ XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
{ XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
{ XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
{ XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
{ XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
{ XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
{ XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
{ XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
{ 0, NULL }
@ -947,7 +905,7 @@ xfs_fs_destroy_inode(
{
struct xfs_inode *ip = XFS_I(inode);
xfs_itrace_entry(ip);
trace_xfs_destroy_inode(ip);
XFS_STATS_INC(vn_reclaim);
@ -1063,10 +1021,8 @@ xfs_log_inode(
* an inode in another recent transaction. So we play it safe and
* fire off the transaction anyway.
*/
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
@ -1082,27 +1038,18 @@ xfs_fs_write_inode(
struct xfs_mount *mp = ip->i_mount;
int error = EAGAIN;
xfs_itrace_entry(ip);
trace_xfs_write_inode(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (wbc->sync_mode == WB_SYNC_ALL) {
/*
* Make sure the inode has hit stable storage. By using the
* log and the fsync transactions we reduce the IOs we have
* to do here from two (log and inode) to just the log.
*
* Note: We still need to do a delwri write of the inode after
* this to flush it to the backing buffer so that bulkstat
* works properly if this is the first time the inode has been
* written. Because we hold the ilock atomically over the
* transaction commit and the inode flush we are guaranteed
* that the inode is not pinned when it returns. If the flush
* lock is already held, then the inode has already been
* flushed once and we don't need to flush it again. Hence
* the code will only flush the inode if it isn't already
* being flushed.
* Make sure the inode has made it it into the log. Instead
* of forcing it all the way to stable storage using a
* synchronous transaction we let the log force inside the
* ->sync_fs call do that for thus, which reduces the number
* of synchronous log foces dramatically.
*/
xfs_ioend_wait(ip);
xfs_ilock(ip, XFS_ILOCK_SHARED);
@ -1116,27 +1063,29 @@ xfs_fs_write_inode(
* We make this non-blocking if the inode is contended, return
* EAGAIN to indicate to the caller that they did not succeed.
* This prevents the flush path from blocking on inodes inside
* another operation right now, they get caught later by xfs_sync.
* another operation right now, they get caught later by
* xfs_sync.
*/
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
goto out;
}
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
goto out_unlock;
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
goto out_unlock;
/*
* Now we have the flush lock and the inode is not pinned, we can check
* if the inode is really clean as we know that there are no pending
* transaction completions, it is not waiting on the delayed write
* queue and there is no IO in progress.
*/
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
error = 0;
goto out_unlock;
/*
* Now we have the flush lock and the inode is not pinned, we
* can check if the inode is really clean as we know that
* there are no pending transaction completions, it is not
* waiting on the delayed write queue and there is no IO in
* progress.
*/
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
error = 0;
goto out_unlock;
}
error = xfs_iflush(ip, 0);
}
error = xfs_iflush(ip, 0);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@ -1156,7 +1105,8 @@ xfs_fs_clear_inode(
{
xfs_inode_t *ip = XFS_I(inode);
xfs_itrace_entry(ip);
trace_xfs_clear_inode(ip);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
@ -1193,22 +1143,13 @@ xfs_fs_put_super(
{
struct xfs_mount *mp = XFS_M(sb);
/*
* Unregister the memory shrinker before we tear down the mount
* structure so we don't have memory reclaim racing with us here.
*/
xfs_inode_shrinker_unregister(mp);
xfs_syncd_stop(mp);
if (!(sb->s_flags & MS_RDONLY)) {
/*
* XXX(hch): this should be SYNC_WAIT.
*
* Or more likely not needed at all because the VFS is already
* calling ->sync_fs after shutting down all filestem
* operations and just before calling ->put_super.
*/
xfs_sync_data(mp, 0);
xfs_sync_attr(mp, 0);
}
XFS_SEND_PREUNMOUNT(mp);
/*
* Blow away any referenced inode in the filestreams cache.
* This can and will cause log traffic as inodes go inactive
@ -1218,14 +1159,10 @@ xfs_fs_put_super(
XFS_bflush(mp->m_ddev_targp);
XFS_SEND_UNMOUNT(mp);
xfs_unmountfs(mp);
xfs_freesb(mp);
xfs_inode_shrinker_unregister(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
xfs_dmops_put(mp);
xfs_free_fsname(mp);
kfree(mp);
}
@ -1543,7 +1480,6 @@ xfs_fs_fill_super(
struct inode *root;
struct xfs_mount *mp = NULL;
int flags = 0, error = ENOMEM;
char *mtpt = NULL;
mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
if (!mp)
@ -1559,7 +1495,7 @@ xfs_fs_fill_super(
mp->m_super = sb;
sb->s_fs_info = mp;
error = xfs_parseargs(mp, (char *)data, &mtpt);
error = xfs_parseargs(mp, (char *)data);
if (error)
goto out_free_fsname;
@ -1571,16 +1507,12 @@ xfs_fs_fill_super(
#endif
sb->s_op = &xfs_super_operations;
error = xfs_dmops_get(mp);
if (error)
goto out_free_fsname;
if (silent)
flags |= XFS_MFSI_QUIET;
error = xfs_open_devices(mp);
if (error)
goto out_put_dmops;
goto out_free_fsname;
if (xfs_icsb_init_counters(mp))
mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
@ -1608,8 +1540,6 @@ xfs_fs_fill_super(
if (error)
goto out_filestream_unmount;
XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@ -1638,7 +1568,6 @@ xfs_fs_fill_super(
xfs_inode_shrinker_register(mp);
kfree(mtpt);
return 0;
out_filestream_unmount:
@ -1648,11 +1577,8 @@ xfs_fs_fill_super(
out_destroy_counters:
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
out_put_dmops:
xfs_dmops_put(mp);
out_free_fsname:
xfs_free_fsname(mp);
kfree(mtpt);
kfree(mp);
out:
return -error;
@ -1759,6 +1685,12 @@ xfs_init_zones(void)
if (!xfs_trans_zone)
goto out_destroy_ifork_zone;
xfs_log_item_desc_zone =
kmem_zone_init(sizeof(struct xfs_log_item_desc),
"xfs_log_item_desc");
if (!xfs_log_item_desc_zone)
goto out_destroy_trans_zone;
/*
* The size of the zone allocated buf log item is the maximum
* size possible under XFS. This wastes a little bit of memory,
@ -1768,7 +1700,7 @@ xfs_init_zones(void)
(((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
NBWORD) * sizeof(int))), "xfs_buf_item");
if (!xfs_buf_item_zone)
goto out_destroy_trans_zone;
goto out_destroy_log_item_desc_zone;
xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
((XFS_EFD_MAX_FAST_EXTENTS - 1) *
@ -1805,6 +1737,8 @@ xfs_init_zones(void)
kmem_zone_destroy(xfs_efd_zone);
out_destroy_buf_item_zone:
kmem_zone_destroy(xfs_buf_item_zone);
out_destroy_log_item_desc_zone:
kmem_zone_destroy(xfs_log_item_desc_zone);
out_destroy_trans_zone:
kmem_zone_destroy(xfs_trans_zone);
out_destroy_ifork_zone:
@ -1835,6 +1769,7 @@ xfs_destroy_zones(void)
kmem_zone_destroy(xfs_efi_zone);
kmem_zone_destroy(xfs_efd_zone);
kmem_zone_destroy(xfs_buf_item_zone);
kmem_zone_destroy(xfs_log_item_desc_zone);
kmem_zone_destroy(xfs_trans_zone);
kmem_zone_destroy(xfs_ifork_zone);
kmem_zone_destroy(xfs_dabuf_zone);

View File

@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
# define XFS_BIGFS_STRING
#endif
#ifdef CONFIG_XFS_DMAPI
# define XFS_DMAPI_STRING "dmapi support, "
#else
# define XFS_DMAPI_STRING
#endif
#ifdef DEBUG
# define XFS_DBG_STRING "debug"
#else
@ -72,7 +66,6 @@ extern void xfs_qm_exit(void);
XFS_SECURITY_STRING \
XFS_REALTIME_STRING \
XFS_BIGFS_STRING \
XFS_DMAPI_STRING \
XFS_DBG_STRING /* DBG must be last */
struct xfs_inode;

View File

@ -24,25 +24,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_inode.h"
#include "xfs_dinode.h"
#include "xfs_error.h"
#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
#include "xfs_utils.h"
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
#include "xfs_rw.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
@ -319,7 +308,7 @@ xfs_sync_inode_attr(
/*
* Write out pagecache data for the whole filesystem.
*/
int
STATIC int
xfs_sync_data(
struct xfs_mount *mp,
int flags)
@ -340,7 +329,7 @@ xfs_sync_data(
/*
* Write out inode metadata (attributes) for the whole filesystem.
*/
int
STATIC int
xfs_sync_attr(
struct xfs_mount *mp,
int flags)
@ -373,8 +362,7 @@ xfs_commit_dummy_trans(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@ -867,7 +855,36 @@ out:
reclaim:
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_ireclaim(ip);
XFS_STATS_INC(xs_ig_reclaims);
/*
* Remove the inode from the per-AG radix tree.
*
* Because radix_tree_delete won't complain even if the item was never
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
write_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
ASSERT(0);
write_unlock(&pag->pag_ici_lock);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
* with inode cache radix tree lookups. This is because the lookup
* can reference the inodes in the cache without taking references.
*
* We make that OK here by ensuring that we wait until the inode is
* unlocked after the lookup before we go ahead and free it. We get
* both the ilock and the iolock because the code may need to drop the
* ilock one but will still hold the iolock.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_inode_free(ip);
return error;
}

View File

@ -35,9 +35,6 @@ typedef struct xfs_sync_work {
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
int xfs_sync_attr(struct xfs_mount *mp, int flags);
int xfs_sync_data(struct xfs_mount *mp, int flags);
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);

View File

@ -24,17 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"

View File

@ -317,8 +317,6 @@ DEFINE_BUF_EVENT(xfs_buf_init);
DEFINE_BUF_EVENT(xfs_buf_free);
DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele);
DEFINE_BUF_EVENT(xfs_buf_pin);
DEFINE_BUF_EVENT(xfs_buf_unpin);
DEFINE_BUF_EVENT(xfs_buf_iodone);
DEFINE_BUF_EVENT(xfs_buf_iorequest);
DEFINE_BUF_EVENT(xfs_buf_bawrite);
@ -541,7 +539,7 @@ DEFINE_LOCK_EVENT(xfs_ilock_nowait);
DEFINE_LOCK_EVENT(xfs_ilock_demote);
DEFINE_LOCK_EVENT(xfs_iunlock);
DECLARE_EVENT_CLASS(xfs_iget_class,
DECLARE_EVENT_CLASS(xfs_inode_class,
TP_PROTO(struct xfs_inode *ip),
TP_ARGS(ip),
TP_STRUCT__entry(
@ -557,16 +555,38 @@ DECLARE_EVENT_CLASS(xfs_iget_class,
__entry->ino)
)
#define DEFINE_IGET_EVENT(name) \
DEFINE_EVENT(xfs_iget_class, name, \
#define DEFINE_INODE_EVENT(name) \
DEFINE_EVENT(xfs_inode_class, name, \
TP_PROTO(struct xfs_inode *ip), \
TP_ARGS(ip))
DEFINE_IGET_EVENT(xfs_iget_skip);
DEFINE_IGET_EVENT(xfs_iget_reclaim);
DEFINE_IGET_EVENT(xfs_iget_found);
DEFINE_IGET_EVENT(xfs_iget_alloc);
DEFINE_INODE_EVENT(xfs_iget_skip);
DEFINE_INODE_EVENT(xfs_iget_reclaim);
DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
DEFINE_INODE_EVENT(xfs_iget_hit);
DEFINE_INODE_EVENT(xfs_iget_miss);
DECLARE_EVENT_CLASS(xfs_inode_class,
DEFINE_INODE_EVENT(xfs_getattr);
DEFINE_INODE_EVENT(xfs_setattr);
DEFINE_INODE_EVENT(xfs_readlink);
DEFINE_INODE_EVENT(xfs_alloc_file_space);
DEFINE_INODE_EVENT(xfs_free_file_space);
DEFINE_INODE_EVENT(xfs_readdir);
#ifdef CONFIG_XFS_POSIX_ACL
DEFINE_INODE_EVENT(xfs_check_acl);
#endif
DEFINE_INODE_EVENT(xfs_vm_bmap);
DEFINE_INODE_EVENT(xfs_file_ioctl);
DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
DEFINE_INODE_EVENT(xfs_ioctl_setattr);
DEFINE_INODE_EVENT(xfs_file_fsync);
DEFINE_INODE_EVENT(xfs_destroy_inode);
DEFINE_INODE_EVENT(xfs_write_inode);
DEFINE_INODE_EVENT(xfs_clear_inode);
DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
DECLARE_EVENT_CLASS(xfs_iref_class,
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
TP_ARGS(ip, caller_ip),
TP_STRUCT__entry(
@ -591,20 +611,71 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
(char *)__entry->caller_ip)
)
#define DEFINE_INODE_EVENT(name) \
DEFINE_EVENT(xfs_inode_class, name, \
#define DEFINE_IREF_EVENT(name) \
DEFINE_EVENT(xfs_iref_class, name, \
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
TP_ARGS(ip, caller_ip))
DEFINE_INODE_EVENT(xfs_ihold);
DEFINE_INODE_EVENT(xfs_irele);
DEFINE_INODE_EVENT(xfs_inode_pin);
DEFINE_INODE_EVENT(xfs_inode_unpin);
DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
DEFINE_IREF_EVENT(xfs_ihold);
DEFINE_IREF_EVENT(xfs_irele);
DEFINE_IREF_EVENT(xfs_inode_pin);
DEFINE_IREF_EVENT(xfs_inode_unpin);
DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
DEFINE_INODE_EVENT(xfs_inode);
#define xfs_itrace_entry(ip) \
trace_xfs_inode(ip, _THIS_IP_)
DECLARE_EVENT_CLASS(xfs_namespace_class,
TP_PROTO(struct xfs_inode *dp, struct xfs_name *name),
TP_ARGS(dp, name),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, dp_ino)
__dynamic_array(char, name, name->len)
),
TP_fast_assign(
__entry->dev = VFS_I(dp)->i_sb->s_dev;
__entry->dp_ino = dp->i_ino;
memcpy(__get_str(name), name->name, name->len);
),
TP_printk("dev %d:%d dp ino 0x%llx name %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->dp_ino,
__get_str(name))
)
#define DEFINE_NAMESPACE_EVENT(name) \
DEFINE_EVENT(xfs_namespace_class, name, \
TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \
TP_ARGS(dp, name))
DEFINE_NAMESPACE_EVENT(xfs_remove);
DEFINE_NAMESPACE_EVENT(xfs_link);
DEFINE_NAMESPACE_EVENT(xfs_lookup);
DEFINE_NAMESPACE_EVENT(xfs_create);
DEFINE_NAMESPACE_EVENT(xfs_symlink);
TRACE_EVENT(xfs_rename,
TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
struct xfs_name *src_name, struct xfs_name *target_name),
TP_ARGS(src_dp, target_dp, src_name, target_name),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, src_dp_ino)
__field(xfs_ino_t, target_dp_ino)
__dynamic_array(char, src_name, src_name->len)
__dynamic_array(char, target_name, target_name->len)
),
TP_fast_assign(
__entry->dev = VFS_I(src_dp)->i_sb->s_dev;
__entry->src_dp_ino = src_dp->i_ino;
__entry->target_dp_ino = target_dp->i_ino;
memcpy(__get_str(src_name), src_name->name, src_name->len);
memcpy(__get_str(target_name), target_name->name, target_name->len);
),
TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
" src name %s target name %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->src_dp_ino,
__entry->target_dp_ino,
__get_str(src_name),
__get_str(target_name))
)
DECLARE_EVENT_CLASS(xfs_dquot_class,
TP_PROTO(struct xfs_dquot *dqp),
@ -684,9 +755,6 @@ DEFINE_DQUOT_EVENT(xfs_dqrele);
DEFINE_DQUOT_EVENT(xfs_dqflush);
DEFINE_DQUOT_EVENT(xfs_dqflush_force);
DEFINE_DQUOT_EVENT(xfs_dqflush_done);
/* not really iget events, but we re-use the format */
DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
DECLARE_EVENT_CLASS(xfs_loggrant_class,
TP_PROTO(struct log *log, struct xlog_ticket *tic),
@ -834,33 +902,29 @@ DECLARE_EVENT_CLASS(xfs_page_class,
__field(loff_t, size)
__field(unsigned long, offset)
__field(int, delalloc)
__field(int, unmapped)
__field(int, unwritten)
),
TP_fast_assign(
int delalloc = -1, unmapped = -1, unwritten = -1;
int delalloc = -1, unwritten = -1;
if (page_has_buffers(page))
xfs_count_page_state(page, &delalloc,
&unmapped, &unwritten);
xfs_count_page_state(page, &delalloc, &unwritten);
__entry->dev = inode->i_sb->s_dev;
__entry->ino = XFS_I(inode)->i_ino;
__entry->pgoff = page_offset(page);
__entry->size = i_size_read(inode);
__entry->offset = off;
__entry->delalloc = delalloc;
__entry->unmapped = unmapped;
__entry->unwritten = unwritten;
),
TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
"delalloc %d unmapped %d unwritten %d",
"delalloc %d unwritten %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pgoff,
__entry->size,
__entry->offset,
__entry->delalloc,
__entry->unmapped,
__entry->unwritten)
)

View File

@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
@ -64,8 +54,6 @@
flush lock - ditto.
*/
STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *);
#ifdef DEBUG
xfs_buftarg_t *xfs_dqerror_target;
int xfs_do_dqerror;
@ -390,21 +378,14 @@ xfs_qm_dqalloc(
return (ESRCH);
}
/*
* xfs_trans_commit normally decrements the vnode ref count
* when it unlocks the inode. Since we want to keep the quota
* inode around, we bump the vnode ref count now.
*/
IHOLD(quotip);
xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL);
nmaps = 1;
if ((error = xfs_bmapi(tp, quotip,
offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
&firstblock,
XFS_QM_DQALLOC_SPACE_RES(mp),
&map, &nmaps, &flist, NULL))) {
&map, &nmaps, &flist))) {
goto error0;
}
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@ -520,7 +501,7 @@ xfs_qm_dqtobp(
error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB,
XFS_BMAPI_METADATA,
NULL, 0, &map, &nmaps, NULL, NULL);
NULL, 0, &map, &nmaps, NULL);
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
if (error)
@ -1141,6 +1122,46 @@ xfs_qm_dqrele(
xfs_qm_dqput(dqp);
}
/*
* This is the dquot flushing I/O completion routine. It is called
* from interrupt level when the buffer containing the dquot is
* flushed to disk. It is responsible for removing the dquot logitem
* from the AIL if it has not been re-logged, and unlocking the dquot's
* flush lock. This behavior is very similar to that of inodes..
*/
STATIC void
xfs_qm_dqflush_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
xfs_dquot_t *dqp = qip->qli_dquot;
struct xfs_ail *ailp = lip->li_ailp;
/*
* We only want to pull the item from the AIL if its
* location in the log has not changed since we started the flush.
* Thus, we only bother if the dquot's lsn has
* not changed. First we check the lsn outside the lock
* since it's cheaper, and then we recheck while
* holding the lock before removing the dquot from the AIL.
*/
if ((lip->li_flags & XFS_LI_IN_AIL) &&
lip->li_lsn == qip->qli_flush_lsn) {
/* xfs_trans_ail_delete() drops the AIL lock. */
spin_lock(&ailp->xa_lock);
if (lip->li_lsn == qip->qli_flush_lsn)
xfs_trans_ail_delete(ailp, lip);
else
spin_unlock(&ailp->xa_lock);
}
/*
* Release the dq's flush lock since we're done with it.
*/
xfs_dqfunlock(dqp);
}
/*
* Write a modified dquot to disk.
@ -1222,8 +1243,9 @@ xfs_qm_dqflush(
* Attach an iodone routine so that we can remove this dquot from the
* AIL and release the flush lock once the dquot is synced to disk.
*/
xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *))
xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item));
xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
&dqp->q_logitem.qli_item);
/*
* If the buffer is pinned then push on the log so we won't
* get stuck waiting in the write for too long.
@ -1247,50 +1269,6 @@ xfs_qm_dqflush(
}
/*
* This is the dquot flushing I/O completion routine. It is called
* from interrupt level when the buffer containing the dquot is
* flushed to disk. It is responsible for removing the dquot logitem
* from the AIL if it has not been re-logged, and unlocking the dquot's
* flush lock. This behavior is very similar to that of inodes..
*/
/*ARGSUSED*/
STATIC void
xfs_qm_dqflush_done(
xfs_buf_t *bp,
xfs_dq_logitem_t *qip)
{
xfs_dquot_t *dqp;
struct xfs_ail *ailp;
dqp = qip->qli_dquot;
ailp = qip->qli_item.li_ailp;
/*
* We only want to pull the item from the AIL if its
* location in the log has not changed since we started the flush.
* Thus, we only bother if the dquot's lsn has
* not changed. First we check the lsn outside the lock
* since it's cheaper, and then we recheck while
* holding the lock before removing the dquot from the AIL.
*/
if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
qip->qli_item.li_lsn == qip->qli_flush_lsn) {
/* xfs_trans_ail_delete() drops the AIL lock. */
spin_lock(&ailp->xa_lock);
if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip);
else
spin_unlock(&ailp->xa_lock);
}
/*
* Release the dq's flush lock since we're done with it.
*/
xfs_dqfunlock(dqp);
}
int
xfs_qm_dqlock_nowait(
xfs_dquot_t *dqp)

View File

@ -23,42 +23,36 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_dq_logitem, qli_item);
}
/*
* returns the number of iovecs needed to log the given dquot item.
*/
/* ARGSUSED */
STATIC uint
xfs_qm_dquot_logitem_size(
xfs_dq_logitem_t *logitem)
struct xfs_log_item *lip)
{
/*
* we need only two iovecs, one for the format, one for the real thing
*/
return (2);
return 2;
}
/*
@ -66,22 +60,21 @@ xfs_qm_dquot_logitem_size(
*/
STATIC void
xfs_qm_dquot_logitem_format(
xfs_dq_logitem_t *logitem,
xfs_log_iovec_t *logvec)
struct xfs_log_item *lip,
struct xfs_log_iovec *logvec)
{
ASSERT(logitem);
ASSERT(logitem->qli_dquot);
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
logvec->i_addr = &qlip->qli_format;
logvec->i_len = sizeof(xfs_dq_logformat_t);
logvec->i_type = XLOG_REG_TYPE_QFORMAT;
logvec++;
logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
logvec->i_addr = &qlip->qli_dquot->q_core;
logvec->i_len = sizeof(xfs_disk_dquot_t);
logvec->i_type = XLOG_REG_TYPE_DQUOT;
ASSERT(2 == logitem->qli_item.li_desc->lid_size);
logitem->qli_format.qlf_size = 2;
ASSERT(2 == lip->li_desc->lid_size);
qlip->qli_format.qlf_size = 2;
}
@ -90,9 +83,9 @@ xfs_qm_dquot_logitem_format(
*/
STATIC void
xfs_qm_dquot_logitem_pin(
xfs_dq_logitem_t *logitem)
struct xfs_log_item *lip)
{
xfs_dquot_t *dqp = logitem->qli_dquot;
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
atomic_inc(&dqp->q_pincount);
@ -104,27 +97,18 @@ xfs_qm_dquot_logitem_pin(
* dquot must have been previously pinned with a call to
* xfs_qm_dquot_logitem_pin().
*/
/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_unpin(
xfs_dq_logitem_t *logitem)
struct xfs_log_item *lip,
int remove)
{
xfs_dquot_t *dqp = logitem->qli_dquot;
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
ASSERT(atomic_read(&dqp->q_pincount) > 0);
if (atomic_dec_and_test(&dqp->q_pincount))
wake_up(&dqp->q_pinwait);
}
/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_unpin_remove(
xfs_dq_logitem_t *logitem,
xfs_trans_t *tp)
{
xfs_qm_dquot_logitem_unpin(logitem);
}
/*
* Given the logitem, this writes the corresponding dquot entry to disk
* asynchronously. This is called with the dquot entry securely locked;
@ -133,12 +117,10 @@ xfs_qm_dquot_logitem_unpin_remove(
*/
STATIC void
xfs_qm_dquot_logitem_push(
xfs_dq_logitem_t *logitem)
struct xfs_log_item *lip)
{
xfs_dquot_t *dqp;
int error;
dqp = logitem->qli_dquot;
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
@ -160,27 +142,25 @@ xfs_qm_dquot_logitem_push(
xfs_dqunlock(dqp);
}
/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_qm_dquot_logitem_committed(
xfs_dq_logitem_t *l,
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
/*
* We always re-log the entire dquot when it becomes dirty,
* so, the latest copy _is_ the only one that matters.
*/
return (lsn);
return lsn;
}
/*
* This is called to wait for the given dquot to be unpinned.
* Most of these pin/unpin routines are plagiarized from inode code.
*/
void
xfs_qm_dqunpin_wait(
xfs_dquot_t *dqp)
struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
if (atomic_read(&dqp->q_pincount) == 0)
@ -206,13 +186,12 @@ xfs_qm_dqunpin_wait(
*/
STATIC void
xfs_qm_dquot_logitem_pushbuf(
xfs_dq_logitem_t *qip)
struct xfs_log_item *lip)
{
xfs_dquot_t *dqp;
xfs_mount_t *mp;
xfs_buf_t *bp;
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_buf *bp;
dqp = qip->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
@ -220,22 +199,20 @@ xfs_qm_dquot_logitem_pushbuf(
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
if (completion_done(&dqp->q_flush) ||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
if (completion_done(&dqp->q_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp);
return;
}
mp = dqp->q_mount;
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
return;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
return;
}
/*
@ -250,15 +227,14 @@ xfs_qm_dquot_logitem_pushbuf(
*/
STATIC uint
xfs_qm_dquot_logitem_trylock(
xfs_dq_logitem_t *qip)
struct xfs_log_item *lip)
{
xfs_dquot_t *dqp;
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
dqp = qip->qli_dquot;
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
if (! xfs_qm_dqlock_nowait(dqp))
if (!xfs_qm_dqlock_nowait(dqp))
return XFS_ITEM_LOCKED;
if (!xfs_dqflock_nowait(dqp)) {
@ -269,11 +245,10 @@ xfs_qm_dquot_logitem_trylock(
return XFS_ITEM_PUSHBUF;
}
ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
ASSERT(lip->li_flags & XFS_LI_IN_AIL);
return XFS_ITEM_SUCCESS;
}
/*
* Unlock the dquot associated with the log item.
* Clear the fields of the dquot and dquot log item that
@ -282,12 +257,10 @@ xfs_qm_dquot_logitem_trylock(
*/
STATIC void
xfs_qm_dquot_logitem_unlock(
xfs_dq_logitem_t *ql)
struct xfs_log_item *lip)
{
xfs_dquot_t *dqp;
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
ASSERT(ql != NULL);
dqp = ql->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
@ -304,43 +277,32 @@ xfs_qm_dquot_logitem_unlock(
xfs_dqunlock(dqp);
}
/*
* this needs to stamp an lsn into the dquot, I think.
* rpc's that look at user dquot's would then have to
* push on the dependency recorded in the dquot
*/
/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_committing(
xfs_dq_logitem_t *l,
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
return;
}
/*
* This is the ops vector for dquots
*/
static struct xfs_item_ops xfs_dquot_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_dquot_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_qm_dquot_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))
xfs_qm_dquot_logitem_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_qm_dquot_logitem_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
.iop_pushbuf = (void(*)(xfs_log_item_t*))
xfs_qm_dquot_logitem_pushbuf,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_qm_dquot_logitem_committing
.iop_size = xfs_qm_dquot_logitem_size,
.iop_format = xfs_qm_dquot_logitem_format,
.iop_pin = xfs_qm_dquot_logitem_pin,
.iop_unpin = xfs_qm_dquot_logitem_unpin,
.iop_trylock = xfs_qm_dquot_logitem_trylock,
.iop_unlock = xfs_qm_dquot_logitem_unlock,
.iop_committed = xfs_qm_dquot_logitem_committed,
.iop_push = xfs_qm_dquot_logitem_push,
.iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
.iop_committing = xfs_qm_dquot_logitem_committing
};
/*
@ -350,10 +312,9 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
*/
void
xfs_qm_dquot_logitem_init(
struct xfs_dquot *dqp)
struct xfs_dquot *dqp)
{
xfs_dq_logitem_t *lp;
lp = &dqp->q_logitem;
struct xfs_dq_logitem *lp = &dqp->q_logitem;
xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
&xfs_dquot_item_ops);
@ -374,16 +335,22 @@ xfs_qm_dquot_logitem_init(
/*------------------ QUOTAOFF LOG ITEMS -------------------*/
static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_qoff_logitem, qql_item);
}
/*
* This returns the number of iovecs needed to log the given quotaoff item.
* We only need 1 iovec for an quotaoff item. It just logs the
* quotaoff_log_format structure.
*/
/*ARGSUSED*/
STATIC uint
xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
xfs_qm_qoff_logitem_size(
struct xfs_log_item *lip)
{
return (1);
return 1;
}
/*
@ -394,53 +361,46 @@ xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
* slots in the quotaoff item have been filled.
*/
STATIC void
xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
xfs_log_iovec_t *log_vector)
xfs_qm_qoff_logitem_format(
struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector)
{
ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
log_vector->i_addr = &qflip->qql_format;
log_vector->i_len = sizeof(xfs_qoff_logitem_t);
log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
qf->qql_format.qf_size = 1;
qflip->qql_format.qf_size = 1;
}
/*
* Pinning has no meaning for an quotaoff item, so just return.
*/
/*ARGSUSED*/
STATIC void
xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
xfs_qm_qoff_logitem_pin(
struct xfs_log_item *lip)
{
return;
}
/*
* Since pinning has no meaning for an quotaoff item, unpinning does
* not either.
*/
/*ARGSUSED*/
STATIC void
xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf)
xfs_qm_qoff_logitem_unpin(
struct xfs_log_item *lip,
int remove)
{
return;
}
/*ARGSUSED*/
STATIC void
xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
{
return;
}
/*
* Quotaoff items have no locking, so just return success.
*/
/*ARGSUSED*/
STATIC uint
xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
xfs_qm_qoff_logitem_trylock(
struct xfs_log_item *lip)
{
return XFS_ITEM_LOCKED;
}
@ -449,53 +409,51 @@ xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
* Quotaoff items have no locking or pushing, so return failure
* so that the caller doesn't bother with us.
*/
/*ARGSUSED*/
STATIC void
xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
xfs_qm_qoff_logitem_unlock(
struct xfs_log_item *lip)
{
return;
}
/*
* The quotaoff-start-item is logged only once and cannot be moved in the log,
* so simply return the lsn at which it's been logged.
*/
/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
xfs_qm_qoff_logitem_committed(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
return (lsn);
return lsn;
}
/*
* There isn't much you can do to push on an quotaoff item. It is simply
* stuck waiting for the log to be flushed to disk.
*/
/*ARGSUSED*/
STATIC void
xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
xfs_qm_qoff_logitem_push(
struct xfs_log_item *lip)
{
return;
}
/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_qm_qoffend_logitem_committed(
xfs_qoff_logitem_t *qfe,
xfs_lsn_t lsn)
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
xfs_qoff_logitem_t *qfs;
struct xfs_ail *ailp;
struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
struct xfs_ail *ailp = qfs->qql_item.li_ailp;
qfs = qfe->qql_start_lip;
ailp = qfs->qql_item.li_ailp;
spin_lock(&ailp->xa_lock);
/*
* Delete the qoff-start logitem from the AIL.
* xfs_trans_ail_delete() drops the AIL lock.
*/
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
kmem_free(qfs);
kmem_free(qfe);
return (xfs_lsn_t)-1;
@ -515,71 +473,52 @@ xfs_qm_qoffend_logitem_committed(
* (truly makes the quotaoff irrevocable). If we do something else,
* then maybe we don't need two.
*/
/* ARGSUSED */
STATIC void
xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
xfs_qm_qoff_logitem_committing(
struct xfs_log_item *lip,
xfs_lsn_t commit_lsn)
{
return;
}
/* ARGSUSED */
STATIC void
xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
{
return;
}
static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
xfs_qm_qoff_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_qm_qoffend_logitem_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
.iop_pushbuf = NULL,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_qm_qoffend_logitem_committing
.iop_size = xfs_qm_qoff_logitem_size,
.iop_format = xfs_qm_qoff_logitem_format,
.iop_pin = xfs_qm_qoff_logitem_pin,
.iop_unpin = xfs_qm_qoff_logitem_unpin,
.iop_trylock = xfs_qm_qoff_logitem_trylock,
.iop_unlock = xfs_qm_qoff_logitem_unlock,
.iop_committed = xfs_qm_qoffend_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push,
.iop_committing = xfs_qm_qoff_logitem_committing
};
/*
* This is the ops vector shared by all quotaoff-start log items.
*/
static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
xfs_qm_qoff_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_qm_qoff_logitem_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
.iop_pushbuf = NULL,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_qm_qoff_logitem_committing
.iop_size = xfs_qm_qoff_logitem_size,
.iop_format = xfs_qm_qoff_logitem_format,
.iop_pin = xfs_qm_qoff_logitem_pin,
.iop_unpin = xfs_qm_qoff_logitem_unpin,
.iop_trylock = xfs_qm_qoff_logitem_trylock,
.iop_unlock = xfs_qm_qoff_logitem_unlock,
.iop_committed = xfs_qm_qoff_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push,
.iop_committing = xfs_qm_qoff_logitem_committing
};
/*
* Allocate and initialize an quotaoff item of the correct quota type(s).
*/
xfs_qoff_logitem_t *
struct xfs_qoff_logitem *
xfs_qm_qoff_logitem_init(
struct xfs_mount *mp,
xfs_qoff_logitem_t *start,
uint flags)
struct xfs_mount *mp,
struct xfs_qoff_logitem *start,
uint flags)
{
xfs_qoff_logitem_t *qf;
struct xfs_qoff_logitem *qf;
qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
&xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
@ -587,5 +526,5 @@ xfs_qm_qoff_logitem_init(
qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
qf->qql_format.qf_flags = flags;
qf->qql_start_lip = start;
return (qf);
return qf;
}

View File

@ -23,25 +23,18 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
@ -1497,7 +1490,7 @@ xfs_qm_dqiterate(
maxlblkcnt - lblkno,
XFS_BMAPI_METADATA,
NULL,
0, map, &nmaps, NULL, NULL);
0, map, &nmaps, NULL);
xfs_iunlock(qip, XFS_ILOCK_SHARED);
if (error)
break;
@ -1669,7 +1662,8 @@ xfs_qm_dqusage_adjust(
* making us disable quotas for the file system.
*/
if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
xfs_iput(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
@ -1682,7 +1676,8 @@ xfs_qm_dqusage_adjust(
* Walk thru the extent list and count the realtime blocks.
*/
if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
xfs_iput(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
if (udqp)
xfs_qm_dqput(udqp);
if (gdqp)

View File

@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_qm.h"

View File

@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
#include "xfs_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_qm.h"

View File

@ -26,25 +26,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
#include "xfs_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
@ -248,40 +238,74 @@ out_unlock:
return error;
}
STATIC int
xfs_qm_scall_trunc_qfile(
struct xfs_mount *mp,
xfs_ino_t ino)
{
struct xfs_inode *ip;
struct xfs_trans *tp;
int error;
if (ino == NULLFSINO)
return 0;
error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
if (error)
return error;
xfs_ilock(ip, XFS_IOLOCK_EXCL);
tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_ITRUNCATE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
goto out_put;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip);
error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
if (error) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
goto out_unlock;
}
xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
out_put:
IRELE(ip);
return error;
}
int
xfs_qm_scall_trunc_qfiles(
xfs_mount_t *mp,
uint flags)
{
int error = 0, error2 = 0;
xfs_inode_t *qip;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
return XFS_ERROR(EINVAL);
}
if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip);
if (!error) {
error = xfs_truncate_file(mp, qip);
IRELE(qip);
}
}
if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip);
if (!error2) {
error2 = xfs_truncate_file(mp, qip);
IRELE(qip);
}
}
if (flags & XFS_DQ_USER)
error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
return error ? error : error2;
}
/*
* Switch on (a given) quota enforcement for a filesystem. This takes
* effect immediately.
@ -875,8 +899,9 @@ xfs_dqrele_inode(
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
xfs_iput(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
return 0;
}
@ -1143,7 +1168,8 @@ xfs_qm_internalqcheck_adjust(
* of those now.
*/
if (! ipreleased) {
xfs_iput(ip, lock_flags);
xfs_iunlock(ip, lock_flags);
IRELE(ip);
ipreleased = B_TRUE;
goto again;
}
@ -1160,7 +1186,8 @@ xfs_qm_internalqcheck_adjust(
ASSERT(gd);
xfs_qm_internalqcheck_dqadjust(ip, gd);
}
xfs_iput(ip, lock_flags);
xfs_iunlock(ip, lock_flags);
IRELE(ip);
*res = BULKSTAT_RV_DIDONE;
return (0);
}

View File

@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
@ -59,16 +49,14 @@ xfs_trans_dqjoin(
xfs_trans_t *tp,
xfs_dquot_t *dqp)
{
xfs_dq_logitem_t *lp = &dqp->q_logitem;
ASSERT(dqp->q_transp != tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(lp->qli_dquot == dqp);
ASSERT(dqp->q_logitem.qli_dquot == dqp);
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp));
xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
/*
* Initialize i_transp so we can later determine if this dquot is
@ -93,16 +81,11 @@ xfs_trans_log_dquot(
xfs_trans_t *tp,
xfs_dquot_t *dqp)
{
xfs_log_item_desc_t *lidp;
ASSERT(dqp->q_transp == tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
}
/*
@ -874,9 +857,8 @@ xfs_trans_get_qoff_item(
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)q);
return (q);
xfs_trans_add_item(tp, &q->qql_item);
return q;
}
@ -890,13 +872,8 @@ xfs_trans_log_quotaoff_item(
xfs_trans_t *tp,
xfs_qoff_logitem_t *qlp)
{
xfs_log_item_desc_t *lidp;
lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
}
STATIC void

View File

@ -22,7 +22,6 @@
#include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"

View File

@ -24,18 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@ -688,8 +683,6 @@ xfs_alloc_ag_vextent_near(
xfs_agblock_t ltbno; /* start bno of left side entry */
xfs_agblock_t ltbnoa; /* aligned ... */
xfs_extlen_t ltdiff; /* difference to left side entry */
/*REFERENCED*/
xfs_agblock_t ltend; /* end bno of left side entry */
xfs_extlen_t ltlen; /* length of left side entry */
xfs_extlen_t ltlena; /* aligned ... */
xfs_agblock_t ltnew; /* useful start bno of left side */
@ -814,8 +807,7 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
ltend = ltbno + ltlen;
ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@ -828,7 +820,7 @@ xfs_alloc_ag_vextent_near(
*/
args->agbno = bnew;
ASSERT(bnew >= ltbno);
ASSERT(bnew + blen <= ltend);
ASSERT(bnew + blen <= ltbno + ltlen);
/*
* Set up a cursor for the by-bno tree.
*/
@ -1157,7 +1149,6 @@ xfs_alloc_ag_vextent_near(
/*
* Fix up the length and compute the useful address.
*/
ltend = ltbno + ltlen;
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args)) {
@ -1170,7 +1161,7 @@ xfs_alloc_ag_vextent_near(
(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
ltlen, &ltnew);
ASSERT(ltnew >= ltbno);
ASSERT(ltnew + rlen <= ltend);
ASSERT(ltnew + rlen <= ltbno + ltlen);
ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->agbno = ltnew;
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,

View File

@ -27,16 +27,16 @@ struct xfs_busy_extent;
/*
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/
typedef enum xfs_alloctype
{
XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */
XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */
XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */
XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */
XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */
XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */
XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
} xfs_alloctype_t;
#define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */
#define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
#define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */
#define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
#define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
#define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
#define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
/* this should become an enum again when the tracing code is fixed */
typedef unsigned int xfs_alloctype_t;
#define XFS_ALLOC_TYPES \
{ XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \

View File

@ -24,19 +24,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"

View File

@ -25,19 +25,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
@ -325,8 +319,7 @@ xfs_attr_set_int(
return (error);
}
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
xfs_trans_ijoin(args.trans, dp);
/*
* If the attribute list is non-existent or a shortform list,
@ -396,10 +389,8 @@ xfs_attr_set_int(
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
}
if (committed)
xfs_trans_ijoin(args.trans, dp);
/*
* Commit the leaf transformation. We'll need another (linked)
@ -544,8 +535,7 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
* No need to make quota reservations here. We expect to release some
* blocks not allocate in the common case.
*/
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
xfs_trans_ijoin(args.trans, dp);
/*
* Decide on what work routines to call based on the inode size.
@ -821,8 +811,7 @@ xfs_attr_inactive(xfs_inode_t *dp)
* No need to make quota reservations here. We expect to release some
* blocks, not allocate, in the common case.
*/
xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(trans, dp);
xfs_trans_ijoin(trans, dp);
/*
* Decide on what work routines to call based on the inode size.
@ -981,10 +970,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
/*
* Commit the current trans (including the inode) and start
@ -1085,10 +1072,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
} else
xfs_da_buf_done(bp);
@ -1161,10 +1146,8 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
} else
xfs_da_buf_done(bp);
return(0);
@ -1317,10 +1300,8 @@ restart:
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
/*
* Commit the node conversion and start the next
@ -1356,10 +1337,8 @@ restart:
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
} else {
/*
* Addition succeeded, update Btree hashvals.
@ -1470,10 +1449,8 @@ restart:
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
}
/*
@ -1604,10 +1581,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
/*
* Commit the Btree join operation and start a new trans.
@ -1658,10 +1633,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
} else
xfs_da_brelse(args->trans, bp);
}
@ -2004,7 +1977,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
NULL, 0, map, &nmap, NULL, NULL);
NULL, 0, map, &nmap, NULL);
if (error)
return(error);
ASSERT(nmap >= 1);
@ -2083,7 +2056,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
XFS_BMAPI_WRITE,
args->firstblock, args->total, &map, &nmap,
args->flist, NULL);
args->flist);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@ -2099,10 +2072,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
if (committed)
xfs_trans_ijoin(args->trans, dp);
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@ -2136,7 +2107,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
NULL, NULL);
NULL);
if (error) {
return(error);
}
@ -2201,7 +2172,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
args->flist, NULL);
args->flist);
if (error) {
return(error);
}
@ -2239,7 +2210,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
1, args->firstblock, args->flist,
NULL, &done);
&done);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@ -2255,10 +2226,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, args->dp);
}
if (committed)
xfs_trans_ijoin(args->trans, args->dp);
/*
* Close out trans and start the next one in the chain.

View File

@ -24,8 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
@ -33,7 +31,6 @@
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@ -2931,7 +2928,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
nmap = 1;
error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
NULL, 0, &map, &nmap, NULL, NULL);
NULL, 0, &map, &nmap, NULL);
if (error) {
return(error);
}

View File

@ -30,13 +30,10 @@
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
@ -104,7 +101,6 @@ xfs_bmap_add_extent(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd); /* OK to allocate reserved blocks */
@ -122,7 +118,6 @@ xfs_bmap_add_extent_delay_real(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd); /* OK to allocate reserved blocks */
/*
@ -135,7 +130,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp,/* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd); /* OK to allocate reserved blocks */
/*
@ -149,7 +143,6 @@ xfs_bmap_add_extent_hole_real(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork); /* data or attr fork */
/*
@ -162,8 +155,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta); /* Change made to incore extents */
int *logflagsp); /* inode logging flags */
/*
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
@ -200,7 +192,6 @@ xfs_bmap_del_extent(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp,/* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd); /* OK to allocate reserved blocks */
@ -489,7 +480,6 @@ xfs_bmap_add_extent(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd) /* OK to use reserved data blocks */
{
@ -524,15 +514,6 @@ xfs_bmap_add_extent(
logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else
logflags = 0;
/* DELTA: single new extent */
if (delta) {
if (delta->xed_startoff > new->br_startoff)
delta->xed_startoff = new->br_startoff;
if (delta->xed_blockcount <
new->br_startoff + new->br_blockcount)
delta->xed_blockcount = new->br_startoff +
new->br_blockcount;
}
}
/*
* Any kind of new delayed allocation goes here.
@ -542,7 +523,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
&logflags, delta, rsvd)))
&logflags, rsvd)))
goto done;
}
/*
@ -553,7 +534,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
&logflags, delta, whichfork)))
&logflags, whichfork)))
goto done;
} else {
xfs_bmbt_irec_t prev; /* old extent at offset idx */
@ -578,17 +559,17 @@ xfs_bmap_add_extent(
XFS_BTCUR_BPRV_WASDEL);
if ((error = xfs_bmap_add_extent_delay_real(ip,
idx, &cur, new, &da_new, first, flist,
&logflags, delta, rsvd)))
&logflags, rsvd)))
goto done;
} else if (new->br_state == XFS_EXT_NORM) {
ASSERT(new->br_state == XFS_EXT_NORM);
if ((error = xfs_bmap_add_extent_unwritten_real(
ip, idx, &cur, new, &logflags, delta)))
ip, idx, &cur, new, &logflags)))
goto done;
} else {
ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
if ((error = xfs_bmap_add_extent_unwritten_real(
ip, idx, &cur, new, &logflags, delta)))
ip, idx, &cur, new, &logflags)))
goto done;
}
ASSERT(*curp == cur || *curp == NULL);
@ -601,7 +582,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
new, &logflags, delta, whichfork)))
new, &logflags, whichfork)))
goto done;
}
}
@ -666,7 +647,6 @@ xfs_bmap_add_extent_delay_real(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd) /* OK to use reserved data block allocation */
{
xfs_btree_cur_t *cur; /* btree cursor */
@ -797,11 +777,6 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
/* DELTA: Three in-core extents are replaced by one. */
temp = LEFT.br_startoff;
temp2 = LEFT.br_blockcount +
PREV.br_blockcount +
RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@ -832,10 +807,6 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
/* DELTA: Two in-core extents are replaced by one. */
temp = LEFT.br_startoff;
temp2 = LEFT.br_blockcount +
PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@ -867,10 +838,6 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
/* DELTA: Two in-core extents are replaced by one. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount +
RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@ -900,9 +867,6 @@ xfs_bmap_add_extent_delay_real(
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
*dnew = 0;
/* DELTA: The in-core extent described by new changed type. */
temp = new->br_startoff;
temp2 = new->br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@ -942,10 +906,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
temp = LEFT.br_startoff;
temp2 = LEFT.br_blockcount +
PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING:
@ -990,9 +950,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@ -1031,10 +988,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount +
RIGHT.br_blockcount;
break;
case BMAP_RIGHT_FILLING:
@ -1078,9 +1031,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
case 0:
@ -1161,9 +1111,6 @@ xfs_bmap_add_extent_delay_real(
nullstartblock((int)temp2));
trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
*dnew = temp + temp2;
/* DELTA: One in-core extent is split in three. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@ -1179,13 +1126,6 @@ xfs_bmap_add_extent_delay_real(
ASSERT(0);
}
*curp = cur;
if (delta) {
temp2 += temp;
if (delta->xed_startoff > temp)
delta->xed_startoff = temp;
if (delta->xed_blockcount < temp2)
delta->xed_blockcount = temp2;
}
done:
*logflagsp = rval;
return error;
@ -1204,8 +1144,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta) /* Change made to incore extents */
int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
@ -1219,8 +1158,6 @@ xfs_bmap_add_extent_unwritten_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
xfs_filblks_t temp=0;
xfs_filblks_t temp2=0;
#define LEFT r[0]
#define RIGHT r[1]
@ -1341,11 +1278,6 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount, LEFT.br_state)))
goto done;
}
/* DELTA: Three in-core extents are replaced by one. */
temp = LEFT.br_startoff;
temp2 = LEFT.br_blockcount +
PREV.br_blockcount +
RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@ -1382,10 +1314,6 @@ xfs_bmap_add_extent_unwritten_real(
LEFT.br_state)))
goto done;
}
/* DELTA: Two in-core extents are replaced by one. */
temp = LEFT.br_startoff;
temp2 = LEFT.br_blockcount +
PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@ -1422,10 +1350,6 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
/* DELTA: Two in-core extents are replaced by one. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount +
RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@ -1453,9 +1377,6 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
/* DELTA: The in-core extent described by new changed type. */
temp = new->br_startoff;
temp2 = new->br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@ -1501,10 +1422,6 @@ xfs_bmap_add_extent_unwritten_real(
LEFT.br_state))
goto done;
}
/* DELTA: The boundary between two in-core extents moved. */
temp = LEFT.br_startoff;
temp2 = LEFT.br_blockcount +
PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING:
@ -1544,9 +1461,6 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@ -1587,10 +1501,6 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
/* DELTA: The boundary between two in-core extents moved. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount +
RIGHT.br_blockcount;
break;
case BMAP_RIGHT_FILLING:
@ -1630,9 +1540,6 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
case 0:
@ -1692,9 +1599,6 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
/* DELTA: One in-core extent is split in three. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@ -1710,13 +1614,6 @@ xfs_bmap_add_extent_unwritten_real(
ASSERT(0);
}
*curp = cur;
if (delta) {
temp2 += temp;
if (delta->xed_startoff > temp)
delta->xed_startoff = temp;
if (delta->xed_blockcount < temp2)
delta->xed_blockcount = temp2;
}
done:
*logflagsp = rval;
return error;
@ -1736,7 +1633,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd) /* OK to allocate reserved blocks */
{
xfs_bmbt_rec_host_t *ep; /* extent record for idx */
@ -1747,7 +1643,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0; /* temp for indirect calculations */
xfs_filblks_t temp2=0;
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
ep = xfs_iext_get_ext(ifp, idx);
@ -1819,9 +1714,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_iext_remove(ip, idx, 1, state);
ip->i_df.if_lastex = idx - 1;
/* DELTA: Two in-core extents were replaced by one. */
temp2 = temp;
temp = left.br_startoff;
break;
case BMAP_LEFT_CONTIG:
@ -1841,9 +1733,6 @@ xfs_bmap_add_extent_hole_delay(
trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
ip->i_df.if_lastex = idx - 1;
/* DELTA: One in-core extent grew into a hole. */
temp2 = temp;
temp = left.br_startoff;
break;
case BMAP_RIGHT_CONTIG:
@ -1862,9 +1751,6 @@ xfs_bmap_add_extent_hole_delay(
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
ip->i_df.if_lastex = idx;
/* DELTA: One in-core extent grew into a hole. */
temp2 = temp;
temp = new->br_startoff;
break;
case 0:
@ -1876,9 +1762,6 @@ xfs_bmap_add_extent_hole_delay(
oldlen = newlen = 0;
xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
/* DELTA: A new in-core extent was added in a hole. */
temp2 = new->br_blockcount;
temp = new->br_startoff;
break;
}
if (oldlen != newlen) {
@ -1889,13 +1772,6 @@ xfs_bmap_add_extent_hole_delay(
* Nothing to do for disk quota accounting here.
*/
}
if (delta) {
temp2 += temp;
if (delta->xed_startoff > temp)
delta->xed_startoff = temp;
if (delta->xed_blockcount < temp2)
delta->xed_blockcount = temp2;
}
*logflagsp = 0;
return 0;
}
@ -1911,7 +1787,6 @@ xfs_bmap_add_extent_hole_real(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork) /* data or attr fork */
{
xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */
@ -1922,8 +1797,6 @@ xfs_bmap_add_extent_hole_real(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int rval=0; /* return value (logging flags) */
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0;
xfs_filblks_t temp2=0;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
@ -2020,11 +1893,6 @@ xfs_bmap_add_extent_hole_real(
left.br_state)))
goto done;
}
/* DELTA: Two in-core extents were replaced by one. */
temp = left.br_startoff;
temp2 = left.br_blockcount +
new->br_blockcount +
right.br_blockcount;
break;
case BMAP_LEFT_CONTIG:
@ -2056,10 +1924,6 @@ xfs_bmap_add_extent_hole_real(
left.br_state)))
goto done;
}
/* DELTA: One in-core extent grew. */
temp = left.br_startoff;
temp2 = left.br_blockcount +
new->br_blockcount;
break;
case BMAP_RIGHT_CONTIG:
@ -2092,10 +1956,6 @@ xfs_bmap_add_extent_hole_real(
right.br_state)))
goto done;
}
/* DELTA: One in-core extent grew. */
temp = new->br_startoff;
temp2 = new->br_blockcount +
right.br_blockcount;
break;
case 0:
@ -2123,18 +1983,8 @@ xfs_bmap_add_extent_hole_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
/* DELTA: A new extent was added in a hole. */
temp = new->br_startoff;
temp2 = new->br_blockcount;
break;
}
if (delta) {
temp2 += temp;
if (delta->xed_startoff > temp)
delta->xed_startoff = temp;
if (delta->xed_blockcount < temp2)
delta->xed_blockcount = temp2;
}
done:
*logflagsp = rval;
return error;
@ -2959,7 +2809,6 @@ xfs_bmap_del_extent(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd) /* OK to allocate reserved blocks */
{
@ -3265,14 +3114,6 @@ xfs_bmap_del_extent(
if (da_old > da_new)
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
rsvd);
if (delta) {
/* DELTA: report the original extent. */
if (delta->xed_startoff > got.br_startoff)
delta->xed_startoff = got.br_startoff;
if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
delta->xed_blockcount = got.br_startoff +
got.br_blockcount;
}
done:
*logflagsp = flags;
return error;
@ -3754,9 +3595,10 @@ xfs_bmap_add_attrfork(
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
}
ASSERT(ip->i_d.di_anextents == 0);
IHOLD(ip);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_DEV:
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
@ -4483,8 +4325,7 @@ xfs_bmapi(
xfs_extlen_t total, /* total blocks needed */
xfs_bmbt_irec_t *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
xfs_extdelta_t *delta) /* o: change made to incore extents */
xfs_bmap_free_t *flist) /* i/o: list extents to free */
{
xfs_fsblock_t abno; /* allocated block number */
xfs_extlen_t alen; /* allocated extent length */
@ -4596,10 +4437,7 @@ xfs_bmapi(
end = bno + len;
obno = bno;
bma.ip = NULL;
if (delta) {
delta->xed_startoff = NULLFILEOFF;
delta->xed_blockcount = 0;
}
while (bno < end && n < *nmap) {
/*
* Reading past eof, act as though there's a hole
@ -4620,19 +4458,13 @@ xfs_bmapi(
* allocate the stuff asked for in this bmap call
* but that wouldn't be as good.
*/
if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
if (wasdelay) {
alen = (xfs_extlen_t)got.br_blockcount;
aoff = got.br_startoff;
if (lastx != NULLEXTNUM && lastx) {
ep = xfs_iext_get_ext(ifp, lastx - 1);
xfs_bmbt_get_all(ep, &prev);
}
} else if (wasdelay) {
alen = (xfs_extlen_t)
XFS_FILBLKS_MIN(len,
(got.br_startoff +
got.br_blockcount) - bno);
aoff = bno;
} else {
alen = (xfs_extlen_t)
XFS_FILBLKS_MIN(len, MAXEXTLEN);
@ -4831,7 +4663,7 @@ xfs_bmapi(
got.br_state = XFS_EXT_UNWRITTEN;
}
error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
firstblock, flist, &tmp_logflags, delta,
firstblock, flist, &tmp_logflags,
whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
logflags |= tmp_logflags;
if (error)
@ -4927,7 +4759,7 @@ xfs_bmapi(
}
mval->br_state = XFS_EXT_NORM;
error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
firstblock, flist, &tmp_logflags, delta,
firstblock, flist, &tmp_logflags,
whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
logflags |= tmp_logflags;
if (error)
@ -5017,14 +4849,6 @@ xfs_bmapi(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
error = 0;
if (delta && delta->xed_startoff != NULLFILEOFF) {
/* A change was actually made.
* Note that delta->xed_blockount is an offset at this
* point and needs to be converted to a block count.
*/
ASSERT(delta->xed_blockcount > delta->xed_startoff);
delta->xed_blockcount -= delta->xed_startoff;
}
error0:
/*
* Log everything. Do this after conversion, there's no point in
@ -5136,8 +4960,6 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
xfs_extdelta_t *delta, /* o: change made to incore
extents */
int *done) /* set if not done yet */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@ -5196,10 +5018,7 @@ xfs_bunmapi(
bno = start + len - 1;
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
&prev);
if (delta) {
delta->xed_startoff = NULLFILEOFF;
delta->xed_blockcount = 0;
}
/*
* Check to see if the given block number is past the end of the
* file, back up to the last block if so...
@ -5297,7 +5116,7 @@ xfs_bunmapi(
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
firstblock, flist, &logflags, delta,
firstblock, flist, &logflags,
XFS_DATA_FORK, 0);
if (error)
goto error0;
@ -5352,7 +5171,7 @@ xfs_bunmapi(
prev.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
&prev, firstblock, flist, &logflags,
delta, XFS_DATA_FORK, 0);
XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@ -5361,7 +5180,7 @@ xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur,
&del, firstblock, flist, &logflags,
delta, XFS_DATA_FORK, 0);
XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@ -5414,7 +5233,7 @@ xfs_bunmapi(
goto error0;
}
error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
&tmp_logflags, delta, whichfork, rsvd);
&tmp_logflags, whichfork, rsvd);
logflags |= tmp_logflags;
if (error)
goto error0;
@ -5471,14 +5290,6 @@ nodelete:
ASSERT(ifp->if_ext_max ==
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
error = 0;
if (delta && delta->xed_startoff != NULLFILEOFF) {
/* A change was actually made.
* Note that delta->xed_blockount is an offset at this
* point and needs to be converted to a block count.
*/
ASSERT(delta->xed_blockcount > delta->xed_startoff);
delta->xed_blockcount -= delta->xed_startoff;
}
error0:
/*
* Log everything. Do this after conversion, there's no point in
@ -5605,28 +5416,6 @@ xfs_getbmap(
prealloced = 0;
fixlen = 1LL << 32;
} else {
/*
* If the BMV_IF_NO_DMAPI_READ interface bit specified, do
* not generate a DMAPI read event. Otherwise, if the
* DM_EVENT_READ bit is set for the file, generate a read
* event in order that the DMAPI application may do its thing
* before we return the extents. Usually this means restoring
* user file data to regions of the file that look like holes.
*
* The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
* BMV_IF_NO_DMAPI_READ so that read events are generated.
* If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
* could misinterpret holes in a DMAPI file as true holes,
* when in fact they may represent offline user data.
*/
if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
!(iflags & BMV_IF_NO_DMAPI_READ)) {
error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
0, 0, 0, NULL);
if (error)
return XFS_ERROR(error);
}
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
@ -5713,7 +5502,7 @@ xfs_getbmap(
error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
bmapi_flags, NULL, 0, map, &nmap,
NULL, NULL);
NULL);
if (error)
goto out_free_map;
ASSERT(nmap <= subnex);
@ -5859,66 +5648,34 @@ xfs_bmap_eof(
}
#ifdef DEBUG
STATIC
xfs_buf_t *
STATIC struct xfs_buf *
xfs_bmap_get_bp(
xfs_btree_cur_t *cur,
struct xfs_btree_cur *cur,
xfs_fsblock_t bno)
{
int i;
xfs_buf_t *bp;
struct xfs_log_item_desc *lidp;
int i;
if (!cur)
return(NULL);
return NULL;
bp = NULL;
for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
bp = cur->bc_bufs[i];
if (!bp) break;
if (XFS_BUF_ADDR(bp) == bno)
break; /* Found it */
for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
if (!cur->bc_bufs[i])
break;
if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
return cur->bc_bufs[i];
}
if (i == XFS_BTREE_MAXLEVELS)
bp = NULL;
if (!bp) { /* Chase down all the log items to see if the bp is there */
xfs_log_item_chunk_t *licp;
xfs_trans_t *tp;
tp = cur->bc_tp;
licp = &tp->t_items;
while (!bp && licp != NULL) {
if (xfs_lic_are_all_free(licp)) {
licp = licp->lic_next;
continue;
}
for (i = 0; i < licp->lic_unused; i++) {
xfs_log_item_desc_t *lidp;
xfs_log_item_t *lip;
xfs_buf_log_item_t *bip;
xfs_buf_t *lbp;
if (xfs_lic_isfree(licp, i)) {
continue;
}
lidp = xfs_lic_slot(licp, i);
lip = lidp->lid_item;
if (lip->li_type != XFS_LI_BUF)
continue;
bip = (xfs_buf_log_item_t *)lip;
lbp = bip->bli_buf;
if (XFS_BUF_ADDR(lbp) == bno) {
bp = lbp;
break; /* Found it */
}
}
licp = licp->lic_next;
}
/* Chase down all the log items to see if the bp is there */
list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
struct xfs_buf_log_item *bip;
bip = (struct xfs_buf_log_item *)lidp->lid_item;
if (bip->bli_item.li_type == XFS_LI_BUF &&
XFS_BUF_ADDR(bip->bli_buf) == bno)
return bip->bli_buf;
}
return(bp);
return NULL;
}
STATIC void

View File

@ -27,20 +27,6 @@ struct xfs_trans;
extern kmem_zone_t *xfs_bmap_free_item_zone;
/*
* DELTA: describe a change to the in-core extent list.
*
* Internally the use of xed_blockount is somewhat funky.
* xed_blockcount contains an offset much of the time because this
* makes merging changes easier. (xfs_fileoff_t and xfs_filblks_t are
* the same underlying type).
*/
typedef struct xfs_extdelta
{
xfs_fileoff_t xed_startoff; /* offset of range */
xfs_filblks_t xed_blockcount; /* blocks in range */
} xfs_extdelta_t;
/*
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
@ -82,16 +68,13 @@ typedef struct xfs_bmap_free
#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */
#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */
#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */
#define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */
#define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */
#define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */
#define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */
#define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */
#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
#define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */
#define XFS_BMAPI_RSVBLOCKS 0x020 /* OK to alloc. reserved data blocks */
#define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */
#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */
/* combine contig. space */
#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */
/* XFS_BMAPI_DIRECT_IO 0x800 */
#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
#define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */
#define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */
/* need write cache flushing and no */
/* additional allocation alignments */
@ -100,9 +83,7 @@ typedef struct xfs_bmap_free
{ XFS_BMAPI_DELAY, "DELAY" }, \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_METADATA, "METADATA" }, \
{ XFS_BMAPI_EXACT, "EXACT" }, \
{ XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
{ XFS_BMAPI_ASYNC, "ASYNC" }, \
{ XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
{ XFS_BMAPI_IGSTATE, "IGSTATE" }, \
@ -310,9 +291,7 @@ xfs_bmapi(
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
xfs_extdelta_t *delta); /* o: change made to incore
extents */
xfs_bmap_free_t *flist); /* i/o: list extents to free */
/*
* Map file blocks to filesystem blocks, simple version.
@ -346,8 +325,6 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
xfs_extdelta_t *delta, /* o: change made to incore
extents */
int *done); /* set if not done yet */
/*

View File

@ -24,21 +24,16 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
#include "xfs_error.h"

View File

@ -24,20 +24,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"

View File

@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
@ -34,6 +33,12 @@
kmem_zone_t *xfs_buf_item_zone;
static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_buf_log_item, bli_item);
}
#ifdef XFS_TRANS_DEBUG
/*
* This function uses an alternate strategy for tracking the bytes
@ -151,12 +156,13 @@ STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
*/
STATIC uint
xfs_buf_item_size(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip)
{
uint nvecs;
int next_bit;
int last_bit;
xfs_buf_t *bp;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
uint nvecs;
int next_bit;
int last_bit;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
@ -170,7 +176,6 @@ xfs_buf_item_size(
return 1;
}
bp = bip->bli_buf;
ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
nvecs = 1;
last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
@ -219,13 +224,13 @@ xfs_buf_item_size(
*/
STATIC void
xfs_buf_item_format(
xfs_buf_log_item_t *bip,
xfs_log_iovec_t *log_vector)
struct xfs_log_item *lip,
struct xfs_log_iovec *vecp)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
uint base_size;
uint nvecs;
xfs_log_iovec_t *vecp;
xfs_buf_t *bp;
int first_bit;
int last_bit;
int next_bit;
@ -235,8 +240,6 @@ xfs_buf_item_format(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
bp = bip->bli_buf;
vecp = log_vector;
/*
* The size of the base structure is the size of the
@ -248,7 +251,7 @@ xfs_buf_item_format(
base_size =
(uint)(sizeof(xfs_buf_log_format_t) +
((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
vecp->i_addr = &bip->bli_format;
vecp->i_len = base_size;
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
@ -263,7 +266,7 @@ xfs_buf_item_format(
*/
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
xfs_log_item_in_current_chkpt(&bip->bli_item)))
xfs_log_item_in_current_chkpt(lip)))
bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
bip->bli_flags &= ~XFS_BLI_INODE_BUF;
}
@ -356,66 +359,90 @@ xfs_buf_item_format(
/*
* This is called to pin the buffer associated with the buf log item in memory
* so it cannot be written out. Simply call bpin() on the buffer to do this.
* so it cannot be written out.
*
* We also always take a reference to the buffer log item here so that the bli
* is held while the item is pinned in memory. This means that we can
* unconditionally drop the reference count a transaction holds when the
* transaction is completed.
*/
STATIC void
xfs_buf_item_pin(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip)
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
bp = bip->bli_buf;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_ISBUSY(bip->bli_buf));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
atomic_inc(&bip->bli_refcount);
trace_xfs_buf_item_pin(bip);
xfs_bpin(bp);
}
trace_xfs_buf_item_pin(bip);
atomic_inc(&bip->bli_refcount);
atomic_inc(&bip->bli_buf->b_pin_count);
}
/*
* This is called to unpin the buffer associated with the buf log
* item which was previously pinned with a call to xfs_buf_item_pin().
* Just call bunpin() on the buffer to do this.
*
* Also drop the reference to the buf item for the current transaction.
* If the XFS_BLI_STALE flag is set and we are the last reference,
* then free up the buf log item and unlock the buffer.
*
* If the remove flag is set we are called from uncommit in the
* forced-shutdown path. If that is true and the reference count on
* the log item is going to drop to zero we need to free the item's
* descriptor in the transaction.
*/
STATIC void
xfs_buf_item_unpin(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip,
int remove)
{
struct xfs_ail *ailp;
xfs_buf_t *bp;
int freed;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
xfs_buf_t *bp = bip->bli_buf;
struct xfs_ail *ailp = lip->li_ailp;
int stale = bip->bli_flags & XFS_BLI_STALE;
int freed;
bp = bip->bli_buf;
ASSERT(bp != NULL);
ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_buf_item_unpin(bip);
freed = atomic_dec_and_test(&bip->bli_refcount);
ailp = bip->bli_item.li_ailp;
xfs_bunpin(bp);
if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters);
if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
trace_xfs_buf_item_unpin_stale(bip);
if (remove) {
/*
* We have to remove the log item from the transaction
* as we are about to release our reference to the
* buffer. If we don't, the unlock that occurs later
* in xfs_trans_uncommit() will ry to reference the
* buffer which we no longer have a hold on.
*/
xfs_trans_del_item(lip);
/*
* Since the transaction no longer refers to the buffer,
* the buffer should no longer refer to the transaction.
*/
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
}
/*
* If we get called here because of an IO error, we may
* or may not have the item on the AIL. xfs_trans_ail_delete()
@ -436,48 +463,6 @@ xfs_buf_item_unpin(
}
}
/*
* this is called from uncommit in the forced-shutdown path.
* we need to check to see if the reference count on the log item
* is going to drop to zero. If so, unpin will free the log item
* so we need to free the item's descriptor (that points to the item)
* in the transaction.
*/
STATIC void
xfs_buf_item_unpin_remove(
xfs_buf_log_item_t *bip,
xfs_trans_t *tp)
{
/* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
if ((atomic_read(&bip->bli_refcount) == 1) &&
(bip->bli_flags & XFS_BLI_STALE)) {
/*
* yes -- We can safely do some work here and then call
* buf_item_unpin to do the rest because we are
* are holding the buffer locked so no one else will be
* able to bump up the refcount. We have to remove the
* log item from the transaction as we are about to release
* our reference to the buffer. If we don't, the unlock that
* occurs later in the xfs_trans_uncommit() will try to
* reference the buffer which we no longer have a hold on.
*/
struct xfs_log_item_desc *lidp;
ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
trace_xfs_buf_item_unpin_stale(bip);
lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
xfs_trans_free_item(tp, lidp);
/*
* Since the transaction no longer refers to the buffer, the
* buffer should no longer refer to the transaction.
*/
XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
}
xfs_buf_item_unpin(bip);
}
/*
* This is called to attempt to lock the buffer associated with this
* buf log item. Don't sleep on the buffer lock. If we can't get
@ -488,11 +473,11 @@ xfs_buf_item_unpin_remove(
*/
STATIC uint
xfs_buf_item_trylock(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip)
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
bp = bip->bli_buf;
if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED;
if (!XFS_BUF_CPSEMA(bp))
@ -529,13 +514,12 @@ xfs_buf_item_trylock(
*/
STATIC void
xfs_buf_item_unlock(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip)
{
int aborted;
xfs_buf_t *bp;
uint hold;
bp = bip->bli_buf;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
int aborted;
uint hold;
/* Clear the buffer's association with this transaction. */
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
@ -546,7 +530,7 @@ xfs_buf_item_unlock(
* (cancelled) buffers at unpin time, but we'll never go through the
* pin/unpin cycle if we abort inside commit.
*/
aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0;
aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
/*
* Before possibly freeing the buf item, determine if we should
@ -607,16 +591,16 @@ xfs_buf_item_unlock(
*/
STATIC xfs_lsn_t
xfs_buf_item_committed(
xfs_buf_log_item_t *bip,
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
trace_xfs_buf_item_committed(bip);
if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
(bip->bli_item.li_lsn != 0)) {
return bip->bli_item.li_lsn;
}
return (lsn);
if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
return lip->li_lsn;
return lsn;
}
/*
@ -626,15 +610,16 @@ xfs_buf_item_committed(
*/
STATIC void
xfs_buf_item_push(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip)
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
trace_xfs_buf_item_push(bip);
bp = bip->bli_buf;
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
xfs_buf_relse(bp);
}
@ -646,22 +631,24 @@ xfs_buf_item_push(
*/
STATIC void
xfs_buf_item_pushbuf(
xfs_buf_log_item_t *bip)
struct xfs_log_item *lip)
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(XFS_BUF_ISDELAYWRITE(bp));
trace_xfs_buf_item_pushbuf(bip);
bp = bip->bli_buf;
ASSERT(XFS_BUF_ISDELAYWRITE(bp));
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
}
/* ARGSUSED */
STATIC void
xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
xfs_buf_item_committing(
struct xfs_log_item *lip,
xfs_lsn_t commit_lsn)
{
}
@ -669,21 +656,16 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
* This is the ops vector shared by all buf log items.
*/
static struct xfs_item_ops xfs_buf_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_buf_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_buf_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
.iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committing
.iop_size = xfs_buf_item_size,
.iop_format = xfs_buf_item_format,
.iop_pin = xfs_buf_item_pin,
.iop_unpin = xfs_buf_item_unpin,
.iop_trylock = xfs_buf_item_trylock,
.iop_unlock = xfs_buf_item_unlock,
.iop_committed = xfs_buf_item_committed,
.iop_push = xfs_buf_item_push,
.iop_pushbuf = xfs_buf_item_pushbuf,
.iop_committing = xfs_buf_item_committing
};
@ -712,7 +694,6 @@ xfs_buf_item_init(
*/
if (bp->b_mount != mp)
bp->b_mount = mp;
XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
if (lip->li_type == XFS_LI_BUF) {
@ -1098,15 +1079,14 @@ xfs_buf_error_relse(
* It is called by xfs_buf_iodone_callbacks() above which will take
* care of cleaning up the buffer itself.
*/
/* ARGSUSED */
void
xfs_buf_iodone(
xfs_buf_t *bp,
xfs_buf_log_item_t *bip)
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
struct xfs_ail *ailp = bip->bli_item.li_ailp;
struct xfs_ail *ailp = lip->li_ailp;
ASSERT(bip->bli_buf == bp);
ASSERT(BUF_ITEM(lip)->bli_buf == bp);
xfs_buf_rele(bp);
@ -1120,6 +1100,6 @@ xfs_buf_iodone(
* Either way, AIL is useless if we're forcing a shutdown.
*/
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
xfs_buf_item_free(bip);
xfs_trans_ail_delete(ailp, lip);
xfs_buf_item_free(BUF_ITEM(lip));
}

View File

@ -124,7 +124,7 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void(*)(struct xfs_buf *, xfs_log_item_t *),
xfs_log_item_t *);
void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
#ifdef XFS_TRANS_DEBUG
void

View File

@ -25,19 +25,14 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
@ -581,16 +576,14 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
xfs_da_intnode_t *node;
xfs_da_node_entry_t *btree;
int tmp;
xfs_mount_t *mp;
node = oldblk->bp->data;
mp = state->mp;
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
ASSERT(newblk->blkno != 0);
if (state->args->whichfork == XFS_DATA_FORK)
ASSERT(newblk->blkno >= mp->m_dirleafblk &&
newblk->blkno < mp->m_dirfreeblk);
ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
newblk->blkno < state->mp->m_dirfreeblk);
/*
* We may need to make some room before we insert the new node.
@ -1601,7 +1594,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
args->flist, NULL))) {
args->flist))) {
return error;
}
ASSERT(nmap <= 1);
@ -1622,8 +1615,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
XFS_BMAPI_METADATA,
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist,
NULL))) {
&mapp[mapi], &nmap, args->flist))) {
kmem_free(mapp);
return error;
}
@ -1884,7 +1876,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
*/
if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
0, args->firstblock, args->flist, NULL,
0, args->firstblock, args->flist,
&done)) == ENOSPC) {
if (w != XFS_DATA_FORK)
break;
@ -1989,7 +1981,7 @@ xfs_da_do_buf(
nfsb,
XFS_BMAPI_METADATA |
xfs_bmapi_aflag(whichfork),
NULL, 0, mapp, &nmap, NULL, NULL)))
NULL, 0, mapp, &nmap, NULL)))
goto exit0;
}
} else {

View File

@ -24,24 +24,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_dfrag.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
@ -425,11 +416,8 @@ xfs_swap_extents(
}
IHOLD(ip);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
IHOLD(tip);
xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ijoin_ref(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_log_inode(tp, ip, ilf_fields);
xfs_trans_log_inode(tp, tip, tilf_fields);

View File

@ -25,13 +25,11 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@ -382,7 +380,7 @@ xfs_readdir(
int rval; /* return value */
int v; /* type-checking value */
xfs_itrace_entry(dp);
trace_xfs_readdir(dp);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
@ -549,7 +547,7 @@ xfs_dir2_grow_inode(
if ((error = xfs_bmapi(tp, dp, bno, count,
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
args->flist, NULL)))
args->flist)))
return error;
ASSERT(nmap <= 1);
if (nmap == 1) {
@ -581,8 +579,7 @@ xfs_dir2_grow_inode(
if ((error = xfs_bmapi(tp, dp, b, c,
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist,
NULL))) {
&mapp[mapi], &nmap, args->flist))) {
kmem_free(mapp);
return error;
}
@ -715,7 +712,7 @@ xfs_dir2_shrink_inode(
*/
if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
NULL, &done))) {
&done))) {
/*
* ENOSPC actually can happen if we're in a removename with
* no space reservation, and the resulting block removal

View File

@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@ -1073,10 +1071,10 @@ xfs_dir2_sf_to_block(
*/
buf_len = dp->i_df.if_bytes;
buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
buf = kmem_alloc(buf_len, KM_SLEEP);
memcpy(buf, sfp, dp->i_df.if_bytes);
xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
memcpy(buf, sfp, buf_len);
xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
dp->i_d.di_size = 0;
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
/*

View File

@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_dir2_data.h"

View File

@ -25,11 +25,9 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@ -875,7 +873,7 @@ xfs_dir2_leaf_getdents(
xfs_dir2_byte_to_da(mp,
XFS_DIR2_LEAF_OFFSET) - map_off,
XFS_BMAPI_METADATA, NULL, 0,
&map[map_valid], &nmap, NULL, NULL);
&map[map_valid], &nmap, NULL);
/*
* Don't know if we should ignore this or
* try to return an error.

View File

@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"

View File

@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"

View File

@ -1,170 +0,0 @@
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_DMAPI_H__
#define __XFS_DMAPI_H__
/* Values used to define the on-disk version of dm_attrname_t. All
* on-disk attribute names start with the 8-byte string "SGI_DMI_".
*
* In the on-disk inode, DMAPI attribute names consist of the user-provided
* name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be
* changed.
*/
#define DMATTR_PREFIXLEN 8
#define DMATTR_PREFIXSTRING "SGI_DMI_"
typedef enum {
DM_EVENT_INVALID = -1,
DM_EVENT_CANCEL = 0, /* not supported */
DM_EVENT_MOUNT = 1,
DM_EVENT_PREUNMOUNT = 2,
DM_EVENT_UNMOUNT = 3,
DM_EVENT_DEBUT = 4, /* not supported */
DM_EVENT_CREATE = 5,
DM_EVENT_CLOSE = 6, /* not supported */
DM_EVENT_POSTCREATE = 7,
DM_EVENT_REMOVE = 8,
DM_EVENT_POSTREMOVE = 9,
DM_EVENT_RENAME = 10,
DM_EVENT_POSTRENAME = 11,
DM_EVENT_LINK = 12,
DM_EVENT_POSTLINK = 13,
DM_EVENT_SYMLINK = 14,
DM_EVENT_POSTSYMLINK = 15,
DM_EVENT_READ = 16,
DM_EVENT_WRITE = 17,
DM_EVENT_TRUNCATE = 18,
DM_EVENT_ATTRIBUTE = 19,
DM_EVENT_DESTROY = 20,
DM_EVENT_NOSPACE = 21,
DM_EVENT_USER = 22,
DM_EVENT_MAX = 23
} dm_eventtype_t;
#define HAVE_DM_EVENTTYPE_T
typedef enum {
DM_RIGHT_NULL,
DM_RIGHT_SHARED,
DM_RIGHT_EXCL
} dm_right_t;
#define HAVE_DM_RIGHT_T
/* Defines for determining if an event message should be sent. */
#ifdef HAVE_DMAPI
#define DM_EVENT_ENABLED(ip, event) ( \
unlikely ((ip)->i_mount->m_flags & XFS_MOUNT_DMAPI) && \
( ((ip)->i_d.di_dmevmask & (1 << event)) || \
((ip)->i_mount->m_dmevmask & (1 << event)) ) \
)
#else
#define DM_EVENT_ENABLED(ip, event) (0)
#endif
#define DM_XFS_VALID_FS_EVENTS ( \
(1 << DM_EVENT_PREUNMOUNT) | \
(1 << DM_EVENT_UNMOUNT) | \
(1 << DM_EVENT_NOSPACE) | \
(1 << DM_EVENT_DEBUT) | \
(1 << DM_EVENT_CREATE) | \
(1 << DM_EVENT_POSTCREATE) | \
(1 << DM_EVENT_REMOVE) | \
(1 << DM_EVENT_POSTREMOVE) | \
(1 << DM_EVENT_RENAME) | \
(1 << DM_EVENT_POSTRENAME) | \
(1 << DM_EVENT_LINK) | \
(1 << DM_EVENT_POSTLINK) | \
(1 << DM_EVENT_SYMLINK) | \
(1 << DM_EVENT_POSTSYMLINK) | \
(1 << DM_EVENT_ATTRIBUTE) | \
(1 << DM_EVENT_DESTROY) )
/* Events valid in dm_set_eventlist() when called with a file handle for
a regular file or a symlink. These events are persistent.
*/
#define DM_XFS_VALID_FILE_EVENTS ( \
(1 << DM_EVENT_ATTRIBUTE) | \
(1 << DM_EVENT_DESTROY) )
/* Events valid in dm_set_eventlist() when called with a file handle for
a directory. These events are persistent.
*/
#define DM_XFS_VALID_DIRECTORY_EVENTS ( \
(1 << DM_EVENT_CREATE) | \
(1 << DM_EVENT_POSTCREATE) | \
(1 << DM_EVENT_REMOVE) | \
(1 << DM_EVENT_POSTREMOVE) | \
(1 << DM_EVENT_RENAME) | \
(1 << DM_EVENT_POSTRENAME) | \
(1 << DM_EVENT_LINK) | \
(1 << DM_EVENT_POSTLINK) | \
(1 << DM_EVENT_SYMLINK) | \
(1 << DM_EVENT_POSTSYMLINK) | \
(1 << DM_EVENT_ATTRIBUTE) | \
(1 << DM_EVENT_DESTROY) )
/* Events supported by the XFS filesystem. */
#define DM_XFS_SUPPORTED_EVENTS ( \
(1 << DM_EVENT_MOUNT) | \
(1 << DM_EVENT_PREUNMOUNT) | \
(1 << DM_EVENT_UNMOUNT) | \
(1 << DM_EVENT_NOSPACE) | \
(1 << DM_EVENT_CREATE) | \
(1 << DM_EVENT_POSTCREATE) | \
(1 << DM_EVENT_REMOVE) | \
(1 << DM_EVENT_POSTREMOVE) | \
(1 << DM_EVENT_RENAME) | \
(1 << DM_EVENT_POSTRENAME) | \
(1 << DM_EVENT_LINK) | \
(1 << DM_EVENT_POSTLINK) | \
(1 << DM_EVENT_SYMLINK) | \
(1 << DM_EVENT_POSTSYMLINK) | \
(1 << DM_EVENT_READ) | \
(1 << DM_EVENT_WRITE) | \
(1 << DM_EVENT_TRUNCATE) | \
(1 << DM_EVENT_ATTRIBUTE) | \
(1 << DM_EVENT_DESTROY) )
/*
* Definitions used for the flags field on dm_send_*_event().
*/
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
#define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */
#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
/*
* Pull in platform specific event flags defines
*/
#include "xfs_dmapi_priv.h"
/*
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
#define AT_DELAY_FLAG(f) ((f & XFS_ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
#endif /* __XFS_DMAPI_H__ */

View File

@ -1,55 +0,0 @@
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_dmapi.h"
#include "xfs_inum.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
static struct xfs_dmops xfs_dmcore_stub = {
.xfs_send_data = (xfs_send_data_t)fs_nosys,
.xfs_send_mmap = (xfs_send_mmap_t)fs_noerr,
.xfs_send_destroy = (xfs_send_destroy_t)fs_nosys,
.xfs_send_namesp = (xfs_send_namesp_t)fs_nosys,
.xfs_send_mount = (xfs_send_mount_t)fs_nosys,
.xfs_send_unmount = (xfs_send_unmount_t)fs_noerr,
};
int
xfs_dmops_get(struct xfs_mount *mp)
{
if (mp->m_flags & XFS_MOUNT_DMAPI) {
cmn_err(CE_WARN,
"XFS: dmapi support not available in this kernel.");
return EINVAL;
}
mp->m_dm_ops = &xfs_dmcore_stub;
return 0;
}
void
xfs_dmops_put(struct xfs_mount *mp)
{
}

View File

@ -23,12 +23,8 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_utils.h"

View File

@ -24,7 +24,6 @@
#include "xfs_buf_item.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
@ -33,18 +32,19 @@
kmem_zone_t *xfs_efi_zone;
kmem_zone_t *xfs_efd_zone;
STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *);
static inline struct xfs_efi_log_item *EFI_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_efi_log_item, efi_item);
}
void
xfs_efi_item_free(xfs_efi_log_item_t *efip)
xfs_efi_item_free(
struct xfs_efi_log_item *efip)
{
int nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
kmem_free(efip);
} else {
else
kmem_zone_free(xfs_efi_zone, efip);
}
}
/*
@ -52,9 +52,9 @@ xfs_efi_item_free(xfs_efi_log_item_t *efip)
* We only need 1 iovec for an efi item. It just logs the efi_log_format
* structure.
*/
/*ARGSUSED*/
STATIC uint
xfs_efi_item_size(xfs_efi_log_item_t *efip)
xfs_efi_item_size(
struct xfs_log_item *lip)
{
return 1;
}
@ -67,10 +67,12 @@ xfs_efi_item_size(xfs_efi_log_item_t *efip)
* slots in the efi item have been filled.
*/
STATIC void
xfs_efi_item_format(xfs_efi_log_item_t *efip,
xfs_log_iovec_t *log_vector)
xfs_efi_item_format(
struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector)
{
uint size;
struct xfs_efi_log_item *efip = EFI_ITEM(lip);
uint size;
ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
@ -80,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t);
efip->efi_format.efi_size = 1;
log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
log_vector->i_addr = &efip->efi_format;
log_vector->i_len = size;
log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
ASSERT(size >= sizeof(xfs_efi_log_format_t));
@ -90,60 +92,33 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
/*
* Pinning has no meaning for an efi item, so just return.
*/
/*ARGSUSED*/
STATIC void
xfs_efi_item_pin(xfs_efi_log_item_t *efip)
xfs_efi_item_pin(
struct xfs_log_item *lip)
{
return;
}
/*
* While EFIs cannot really be pinned, the unpin operation is the
* last place at which the EFI is manipulated during a transaction.
* Here we coordinate with xfs_efi_cancel() to determine who gets to
* free the EFI.
*/
/*ARGSUSED*/
STATIC void
xfs_efi_item_unpin(xfs_efi_log_item_t *efip)
xfs_efi_item_unpin(
struct xfs_log_item *lip,
int remove)
{
struct xfs_ail *ailp = efip->efi_item.li_ailp;
struct xfs_efi_log_item *efip = EFI_ITEM(lip);
struct xfs_ail *ailp = lip->li_ailp;
spin_lock(&ailp->xa_lock);
if (efip->efi_flags & XFS_EFI_CANCELED) {
/* xfs_trans_ail_delete() drops the AIL lock. */
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
spin_unlock(&ailp->xa_lock);
}
}
/*
* like unpin only we have to also clear the xaction descriptor
* pointing the log item if we free the item. This routine duplicates
* unpin because efi_flags is protected by the AIL lock. Freeing
* the descriptor and then calling unpin would force us to drop the AIL
* lock which would open up a race condition.
*/
STATIC void
xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
{
struct xfs_ail *ailp = efip->efi_item.li_ailp;
xfs_log_item_desc_t *lidp;
spin_lock(&ailp->xa_lock);
if (efip->efi_flags & XFS_EFI_CANCELED) {
/*
* free the xaction descriptor pointing to this item
*/
lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip);
xfs_trans_free_item(tp, lidp);
if (remove)
xfs_trans_del_item(lip);
/* xfs_trans_ail_delete() drops the AIL lock. */
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
xfs_trans_ail_delete(ailp, lip);
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
@ -158,9 +133,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
* XFS_ITEM_PINNED so that the caller will eventually flush the log.
* This should help in getting the EFI out of the AIL.
*/
/*ARGSUSED*/
STATIC uint
xfs_efi_item_trylock(xfs_efi_log_item_t *efip)
xfs_efi_item_trylock(
struct xfs_log_item *lip)
{
return XFS_ITEM_PINNED;
}
@ -168,13 +143,12 @@ xfs_efi_item_trylock(xfs_efi_log_item_t *efip)
/*
* Efi items have no locking, so just return.
*/
/*ARGSUSED*/
STATIC void
xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
xfs_efi_item_unlock(
struct xfs_log_item *lip)
{
if (efip->efi_item.li_flags & XFS_LI_ABORTED)
xfs_efi_item_free(efip);
return;
if (lip->li_flags & XFS_LI_ABORTED)
xfs_efi_item_free(EFI_ITEM(lip));
}
/*
@ -183,9 +157,10 @@ xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
* flag is not paid any attention here. Checking for that is delayed
* until the EFI is unpinned.
*/
/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
xfs_efi_item_committed(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
return lsn;
}
@ -195,11 +170,10 @@ xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
* stuck waiting for all of its corresponding efd items to be
* committed to disk.
*/
/*ARGSUSED*/
STATIC void
xfs_efi_item_push(xfs_efi_log_item_t *efip)
xfs_efi_item_push(
struct xfs_log_item *lip)
{
return;
}
/*
@ -209,61 +183,55 @@ xfs_efi_item_push(xfs_efi_log_item_t *efip)
* example, for inodes, the inode is locked throughout the extent freeing
* so the dependency should be recorded there.
*/
/*ARGSUSED*/
STATIC void
xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
xfs_efi_item_committing(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
return;
}
/*
* This is the ops vector shared by all efi log items.
*/
static struct xfs_item_ops xfs_efi_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efi_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_efi_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_efi_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_efi_item_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_efi_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push,
.iop_pushbuf = NULL,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_efi_item_committing
.iop_size = xfs_efi_item_size,
.iop_format = xfs_efi_item_format,
.iop_pin = xfs_efi_item_pin,
.iop_unpin = xfs_efi_item_unpin,
.iop_trylock = xfs_efi_item_trylock,
.iop_unlock = xfs_efi_item_unlock,
.iop_committed = xfs_efi_item_committed,
.iop_push = xfs_efi_item_push,
.iop_committing = xfs_efi_item_committing
};
/*
* Allocate and initialize an efi item with the given number of extents.
*/
xfs_efi_log_item_t *
xfs_efi_init(xfs_mount_t *mp,
uint nextents)
struct xfs_efi_log_item *
xfs_efi_init(
struct xfs_mount *mp,
uint nextents)
{
xfs_efi_log_item_t *efip;
struct xfs_efi_log_item *efip;
uint size;
ASSERT(nextents > 0);
if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
size = (uint)(sizeof(xfs_efi_log_item_t) +
((nextents - 1) * sizeof(xfs_extent_t)));
efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP);
efip = kmem_zalloc(size, KM_SLEEP);
} else {
efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone,
KM_SLEEP);
efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP);
}
xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
efip->efi_format.efi_nextents = nextents;
efip->efi_format.efi_id = (__psint_t)(void*)efip;
return (efip);
return efip;
}
/*
@ -276,7 +244,7 @@ xfs_efi_init(xfs_mount_t *mp,
int
xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
{
xfs_efi_log_format_t *src_efi_fmt = (xfs_efi_log_format_t *)buf->i_addr;
xfs_efi_log_format_t *src_efi_fmt = buf->i_addr;
uint i;
uint len = sizeof(xfs_efi_log_format_t) +
(src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t);
@ -289,8 +257,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len);
return 0;
} else if (buf->i_len == len32) {
xfs_efi_log_format_32_t *src_efi_fmt_32 =
(xfs_efi_log_format_32_t *)buf->i_addr;
xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->i_addr;
dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type;
dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size;
@ -304,8 +271,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
}
return 0;
} else if (buf->i_len == len64) {
xfs_efi_log_format_64_t *src_efi_fmt_64 =
(xfs_efi_log_format_64_t *)buf->i_addr;
xfs_efi_log_format_64_t *src_efi_fmt_64 = buf->i_addr;
dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type;
dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size;
@ -356,16 +322,18 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
}
}
STATIC void
xfs_efd_item_free(xfs_efd_log_item_t *efdp)
static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
{
int nexts = efdp->efd_format.efd_nextents;
return container_of(lip, struct xfs_efd_log_item, efd_item);
}
if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
STATIC void
xfs_efd_item_free(struct xfs_efd_log_item *efdp)
{
if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
kmem_free(efdp);
} else {
else
kmem_zone_free(xfs_efd_zone, efdp);
}
}
/*
@ -373,9 +341,9 @@ xfs_efd_item_free(xfs_efd_log_item_t *efdp)
* We only need 1 iovec for an efd item. It just logs the efd_log_format
* structure.
*/
/*ARGSUSED*/
STATIC uint
xfs_efd_item_size(xfs_efd_log_item_t *efdp)
xfs_efd_item_size(
struct xfs_log_item *lip)
{
return 1;
}
@ -388,10 +356,12 @@ xfs_efd_item_size(xfs_efd_log_item_t *efdp)
* slots in the efd item have been filled.
*/
STATIC void
xfs_efd_item_format(xfs_efd_log_item_t *efdp,
xfs_log_iovec_t *log_vector)
xfs_efd_item_format(
struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector)
{
uint size;
struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
uint size;
ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents);
@ -401,48 +371,38 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t);
efdp->efd_format.efd_size = 1;
log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
log_vector->i_addr = &efdp->efd_format;
log_vector->i_len = size;
log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
ASSERT(size >= sizeof(xfs_efd_log_format_t));
}
/*
* Pinning has no meaning for an efd item, so just return.
*/
/*ARGSUSED*/
STATIC void
xfs_efd_item_pin(xfs_efd_log_item_t *efdp)
xfs_efd_item_pin(
struct xfs_log_item *lip)
{
return;
}
/*
* Since pinning has no meaning for an efd item, unpinning does
* not either.
*/
/*ARGSUSED*/
STATIC void
xfs_efd_item_unpin(xfs_efd_log_item_t *efdp)
xfs_efd_item_unpin(
struct xfs_log_item *lip,
int remove)
{
return;
}
/*ARGSUSED*/
STATIC void
xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp)
{
return;
}
/*
* Efd items have no locking, so just return success.
*/
/*ARGSUSED*/
STATIC uint
xfs_efd_item_trylock(xfs_efd_log_item_t *efdp)
xfs_efd_item_trylock(
struct xfs_log_item *lip)
{
return XFS_ITEM_LOCKED;
}
@ -451,13 +411,12 @@ xfs_efd_item_trylock(xfs_efd_log_item_t *efdp)
* Efd items have no locking or pushing, so return failure
* so that the caller doesn't bother with us.
*/
/*ARGSUSED*/
STATIC void
xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
xfs_efd_item_unlock(
struct xfs_log_item *lip)
{
if (efdp->efd_item.li_flags & XFS_LI_ABORTED)
xfs_efd_item_free(efdp);
return;
if (lip->li_flags & XFS_LI_ABORTED)
xfs_efd_item_free(EFD_ITEM(lip));
}
/*
@ -467,15 +426,18 @@ xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
* return -1 to keep the transaction code from further referencing
* this item.
*/
/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
xfs_efd_item_committed(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
/*
* If we got a log I/O error, it's always the case that the LR with the
* EFI got unpinned and freed before the EFD got aborted.
*/
if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
if (!(lip->li_flags & XFS_LI_ABORTED))
xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents);
xfs_efd_item_free(efdp);
@ -486,11 +448,10 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
* There isn't much you can do to push on an efd item. It is simply
* stuck waiting for the log to be flushed to disk.
*/
/*ARGSUSED*/
STATIC void
xfs_efd_item_push(xfs_efd_log_item_t *efdp)
xfs_efd_item_push(
struct xfs_log_item *lip)
{
return;
}
/*
@ -500,55 +461,48 @@ xfs_efd_item_push(xfs_efd_log_item_t *efdp)
* example, for inodes, the inode is locked throughout the extent freeing
* so the dependency should be recorded there.
*/
/*ARGSUSED*/
STATIC void
xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn)
xfs_efd_item_committing(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
return;
}
/*
* This is the ops vector shared by all efd log items.
*/
static struct xfs_item_ops xfs_efd_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efd_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_efd_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_efd_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_efd_item_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_efd_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push,
.iop_pushbuf = NULL,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_efd_item_committing
.iop_size = xfs_efd_item_size,
.iop_format = xfs_efd_item_format,
.iop_pin = xfs_efd_item_pin,
.iop_unpin = xfs_efd_item_unpin,
.iop_trylock = xfs_efd_item_trylock,
.iop_unlock = xfs_efd_item_unlock,
.iop_committed = xfs_efd_item_committed,
.iop_push = xfs_efd_item_push,
.iop_committing = xfs_efd_item_committing
};
/*
* Allocate and initialize an efd item with the given number of extents.
*/
xfs_efd_log_item_t *
xfs_efd_init(xfs_mount_t *mp,
xfs_efi_log_item_t *efip,
uint nextents)
struct xfs_efd_log_item *
xfs_efd_init(
struct xfs_mount *mp,
struct xfs_efi_log_item *efip,
uint nextents)
{
xfs_efd_log_item_t *efdp;
struct xfs_efd_log_item *efdp;
uint size;
ASSERT(nextents > 0);
if (nextents > XFS_EFD_MAX_FAST_EXTENTS) {
size = (uint)(sizeof(xfs_efd_log_item_t) +
((nextents - 1) * sizeof(xfs_extent_t)));
efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP);
efdp = kmem_zalloc(size, KM_SLEEP);
} else {
efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone,
KM_SLEEP);
efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP);
}
xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops);
@ -556,5 +510,5 @@ xfs_efd_init(xfs_mount_t *mp,
efdp->efd_format.efd_nextents = nextents;
efdp->efd_format.efd_efi_id = efip->efi_format.efi_id;
return (efdp);
return efdp;
}

View File

@ -18,13 +18,9 @@
#include "xfs.h"
#include "xfs_bmap_btree.h"
#include "xfs_inum.h"
#include "xfs_dir2.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
@ -127,6 +123,82 @@ typedef struct fstrm_item
xfs_inode_t *pip; /* Parent directory inode pointer. */
} fstrm_item_t;
/*
* Allocation group filestream associations are tracked with per-ag atomic
* counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
* particular AG already has active filestreams associated with it. The mount
* point's m_peraglock is used to protect these counters from per-ag array
* re-allocation during a growfs operation. When xfs_growfs_data_private() is
* about to reallocate the array, it calls xfs_filestream_flush() with the
* m_peraglock held in write mode.
*
* Since xfs_mru_cache_flush() guarantees that all the free functions for all
* the cache elements have finished executing before it returns, it's safe for
* the free functions to use the atomic counters without m_peraglock protection.
* This allows the implementation of xfs_fstrm_free_func() to be agnostic about
* whether it was called with the m_peraglock held in read mode, write mode or
* not held at all. The race condition this addresses is the following:
*
* - The work queue scheduler fires and pulls a filestream directory cache
* element off the LRU end of the cache for deletion, then gets pre-empted.
* - A growfs operation grabs the m_peraglock in write mode, flushes all the
* remaining items from the cache and reallocates the mount point's per-ag
* array, resetting all the counters to zero.
* - The work queue thread resumes and calls the free function for the element
* it started cleaning up earlier. In the process it decrements the
* filestreams counter for an AG that now has no references.
*
* With a shrinkfs feature, the above scenario could panic the system.
*
* All other uses of the following macros should be protected by either the
* m_peraglock held in read mode, or the cache's internal locking exposed by the
* interval between a call to xfs_mru_cache_lookup() and a call to
* xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
* when new elements are added to the cache.
*
* Combined, these locking rules ensure that no associations will ever exist in
* the cache that reference per-ag array elements that have since been
* reallocated.
*/
static int
xfs_filestream_peek_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
int ret;
pag = xfs_perag_get(mp, agno);
ret = atomic_read(&pag->pagf_fstrms);
xfs_perag_put(pag);
return ret;
}
static int
xfs_filestream_get_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
int ret;
pag = xfs_perag_get(mp, agno);
ret = atomic_inc_return(&pag->pagf_fstrms);
xfs_perag_put(pag);
return ret;
}
static void
xfs_filestream_put_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
pag = xfs_perag_get(mp, agno);
atomic_dec(&pag->pagf_fstrms);
xfs_perag_put(pag);
}
/*
* Scan the AGs starting at startag looking for an AG that isn't in use and has
@ -355,16 +427,14 @@ xfs_fstrm_free_func(
{
fstrm_item_t *item = (fstrm_item_t *)data;
xfs_inode_t *ip = item->ip;
int ref;
ASSERT(ip->i_ino == ino);
xfs_iflags_clear(ip, XFS_IFILESTREAM);
/* Drop the reference taken on the AG when the item was added. */
ref = xfs_filestream_put_ag(ip->i_mount, item->ag);
xfs_filestream_put_ag(ip->i_mount, item->ag);
ASSERT(ref >= 0);
TRACE_FREE(ip->i_mount, ip, item->pip, item->ag,
xfs_filestream_peek_ag(ip->i_mount, item->ag));

View File

@ -42,88 +42,6 @@ extern ktrace_t *xfs_filestreams_trace_buf;
#endif
/*
* Allocation group filestream associations are tracked with per-ag atomic
* counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
* particular AG already has active filestreams associated with it. The mount
* point's m_peraglock is used to protect these counters from per-ag array
* re-allocation during a growfs operation. When xfs_growfs_data_private() is
* about to reallocate the array, it calls xfs_filestream_flush() with the
* m_peraglock held in write mode.
*
* Since xfs_mru_cache_flush() guarantees that all the free functions for all
* the cache elements have finished executing before it returns, it's safe for
* the free functions to use the atomic counters without m_peraglock protection.
* This allows the implementation of xfs_fstrm_free_func() to be agnostic about
* whether it was called with the m_peraglock held in read mode, write mode or
* not held at all. The race condition this addresses is the following:
*
* - The work queue scheduler fires and pulls a filestream directory cache
* element off the LRU end of the cache for deletion, then gets pre-empted.
* - A growfs operation grabs the m_peraglock in write mode, flushes all the
* remaining items from the cache and reallocates the mount point's per-ag
* array, resetting all the counters to zero.
* - The work queue thread resumes and calls the free function for the element
* it started cleaning up earlier. In the process it decrements the
* filestreams counter for an AG that now has no references.
*
* With a shrinkfs feature, the above scenario could panic the system.
*
* All other uses of the following macros should be protected by either the
* m_peraglock held in read mode, or the cache's internal locking exposed by the
* interval between a call to xfs_mru_cache_lookup() and a call to
* xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
* when new elements are added to the cache.
*
* Combined, these locking rules ensure that no associations will ever exist in
* the cache that reference per-ag array elements that have since been
* reallocated.
*/
/*
* xfs_filestream_peek_ag is only used in tracing code
*/
static inline int
xfs_filestream_peek_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
int ret;
pag = xfs_perag_get(mp, agno);
ret = atomic_read(&pag->pagf_fstrms);
xfs_perag_put(pag);
return ret;
}
static inline int
xfs_filestream_get_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
int ret;
pag = xfs_perag_get(mp, agno);
ret = atomic_inc_return(&pag->pagf_fstrms);
xfs_perag_put(pag);
return ret;
}
static inline int
xfs_filestream_put_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
int ret;
pag = xfs_perag_get(mp, agno);
ret = atomic_dec_return(&pag->pagf_fstrms);
xfs_perag_put(pag);
return ret;
}
/* allocation selection flags */
typedef enum xfs_fstrm_alloc {
XFS_PICK_USERDATA = 1,

View File

@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@ -626,8 +622,7 @@ xfs_fs_log_dummy(
ip = mp->m_rootip;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);

View File

@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"

View File

@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"

View File

@ -25,14 +25,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@ -95,7 +91,7 @@ xfs_inode_alloc(
return ip;
}
STATIC void
void
xfs_inode_free(
struct xfs_inode *ip)
{
@ -212,7 +208,7 @@ xfs_iget_cache_hit(
ip->i_flags &= ~XFS_INEW;
ip->i_flags |= XFS_IRECLAIMABLE;
__xfs_inode_set_reclaim_tag(pag, ip);
trace_xfs_iget_reclaim(ip);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
}
@ -227,6 +223,7 @@ xfs_iget_cache_hit(
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
trace_xfs_iget_skip(ip);
error = EAGAIN;
goto out_error;
}
@ -234,6 +231,7 @@ xfs_iget_cache_hit(
/* We've got a live one. */
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
trace_xfs_iget_hit(ip);
}
if (lock_flags != 0)
@ -242,7 +240,6 @@ xfs_iget_cache_hit(
xfs_iflags_clear(ip, XFS_ISTALE);
XFS_STATS_INC(xs_ig_found);
trace_xfs_iget_found(ip);
return 0;
out_error:
@ -264,7 +261,6 @@ xfs_iget_cache_miss(
{
struct xfs_inode *ip;
int error;
unsigned long first_index, mask;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
ip = xfs_inode_alloc(mp, ino);
@ -275,7 +271,7 @@ xfs_iget_cache_miss(
if (error)
goto out_destroy;
xfs_itrace_entry(ip);
trace_xfs_iget_miss(ip);
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = ENOENT;
@ -301,8 +297,6 @@ xfs_iget_cache_miss(
BUG();
}
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
first_index = agino & mask;
write_lock(&pag->pag_ici_lock);
/* insert the new inode */
@ -321,7 +315,6 @@ xfs_iget_cache_miss(
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
trace_xfs_iget_alloc(ip);
*ipp = ip;
return 0;
@ -421,97 +414,6 @@ out_error_or_again:
return error;
}
/*
* Decrement reference count of an inode structure and unlock it.
*
* ip -- the inode being released
* lock_flags -- this parameter indicates the inode's locks to be
* to be released. See the comment on xfs_iunlock() for a list
* of valid values.
*/
void
xfs_iput(xfs_inode_t *ip,
uint lock_flags)
{
xfs_itrace_entry(ip);
xfs_iunlock(ip, lock_flags);
IRELE(ip);
}
/*
* Special iput for brand-new inodes that are still locked
*/
void
xfs_iput_new(
xfs_inode_t *ip,
uint lock_flags)
{
struct inode *inode = VFS_I(ip);
xfs_itrace_entry(ip);
if ((ip->i_d.di_mode == 0)) {
ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
make_bad_inode(inode);
}
if (inode->i_state & I_NEW)
unlock_new_inode(inode);
if (lock_flags)
xfs_iunlock(ip, lock_flags);
IRELE(ip);
}
/*
* This is called free all the memory associated with an inode.
* It must free the inode itself and any buffers allocated for
* if_extents/if_data and if_broot. It must also free the lock
* associated with the inode.
*
* Note: because we don't initialise everything on reallocation out
* of the zone, we must ensure we nullify everything correctly before
* freeing the structure.
*/
void
xfs_ireclaim(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
XFS_STATS_INC(xs_ig_reclaims);
/*
* Remove the inode from the per-AG radix tree.
*
* Because radix_tree_delete won't complain even if the item was never
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
write_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root, agino))
ASSERT(0);
write_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
* with inode cache radix tree lookups. This is because the lookup
* can reference the inodes in the cache without taking references.
*
* We make that OK here by ensuring that we wait until the inode is
* unlocked after the lookup before we go ahead and free it. We get
* both the ilock and the iolock because the code may need to drop the
* ilock one but will still hold the iolock.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_inode_free(ip);
}
/*
* This is a wrapper routine around the xfs_ilock() routine
* used to centralize some grungy code. It is used in places

View File

@ -27,13 +27,10 @@
#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@ -44,7 +41,6 @@
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_utils.h"
#include "xfs_quota.h"
@ -426,7 +422,7 @@ xfs_iformat(
if (!XFS_DFORK_Q(dip))
return 0;
ASSERT(ip->i_afp == NULL);
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
ip->i_afp->if_ext_max =
XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
switch (dip->di_aformat) {
@ -509,7 +505,7 @@ xfs_iformat_local(
ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
else {
real_size = roundup(size, 4);
ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
}
ifp->if_bytes = size;
ifp->if_real_bytes = real_size;
@ -636,7 +632,7 @@ xfs_iformat_btree(
}
ifp->if_broot_bytes = size;
ifp->if_broot = kmem_alloc(size, KM_SLEEP);
ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
ASSERT(ifp->if_broot != NULL);
/*
* Copy and convert from the on-disk structure
@ -922,7 +918,6 @@ xfs_iread_extents(
int error;
xfs_ifork_t *ifp;
xfs_extnum_t nextents;
size_t size;
if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
@ -930,7 +925,6 @@ xfs_iread_extents(
return XFS_ERROR(EFSCORRUPTED);
}
nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
size = nextents * sizeof(xfs_bmbt_rec_t);
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
@ -1226,7 +1220,7 @@ xfs_isize_check(
(xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
map_first),
XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
NULL, NULL))
NULL))
return;
ASSERT(nimaps == 1);
ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
@ -1460,7 +1454,7 @@ xfs_itruncate_finish(
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(ip->i_transp == *tp);
ASSERT(ip->i_itemp != NULL);
ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
ASSERT(ip->i_itemp->ili_lock_flags == 0);
ntp = *tp;
@ -1589,11 +1583,10 @@ xfs_itruncate_finish(
xfs_bmap_init(&free_list, &first_block);
error = xfs_bunmapi(ntp, ip,
first_unmap_block, unmap_len,
xfs_bmapi_aflag(fork) |
(sync ? 0 : XFS_BMAPI_ASYNC),
xfs_bmapi_aflag(fork),
XFS_ITRUNC_MAX_EXTENTS,
&first_block, &free_list,
NULL, &done);
&done);
if (error) {
/*
* If the bunmapi call encounters an error,
@ -1612,12 +1605,8 @@ xfs_itruncate_finish(
*/
error = xfs_bmap_finish(tp, &free_list, &committed);
ntp = *tp;
if (committed) {
/* link the inode into the next xact in the chain */
xfs_trans_ijoin(ntp, ip,
XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ihold(ntp, ip);
}
if (committed)
xfs_trans_ijoin(ntp, ip);
if (error) {
/*
@ -1646,9 +1635,7 @@ xfs_itruncate_finish(
error = xfs_trans_commit(*tp, 0);
*tp = ntp;
/* link the inode into the next transaction in the chain */
xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ihold(ntp, ip);
xfs_trans_ijoin(ntp, ip);
if (error)
return error;
@ -1985,7 +1972,7 @@ xfs_ifree_cluster(
if (lip->li_type == XFS_LI_INODE) {
iip = (xfs_inode_log_item_t *)lip;
ASSERT(iip->ili_logged == 1);
lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
lip->li_cb = xfs_istale_done;
xfs_trans_ail_copy_lsn(mp->m_ail,
&iip->ili_flush_lsn,
&iip->ili_item.li_lsn);
@ -2055,9 +2042,8 @@ xfs_ifree_cluster(
xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
&iip->ili_item.li_lsn);
xfs_buf_attach_iodone(bp,
(void(*)(xfs_buf_t*,xfs_log_item_t*))
xfs_istale_done, (xfs_log_item_t *)iip);
xfs_buf_attach_iodone(bp, xfs_istale_done,
&iip->ili_item);
if (ip != free_ip)
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@ -2203,7 +2189,7 @@ xfs_iroot_realloc(
*/
if (ifp->if_broot_bytes == 0) {
new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
ifp->if_broot = kmem_alloc(new_size, KM_SLEEP);
ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
ifp->if_broot_bytes = (int)new_size;
return;
}
@ -2219,7 +2205,7 @@ xfs_iroot_realloc(
new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
KM_SLEEP);
KM_SLEEP | KM_NOFS);
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
@ -2245,7 +2231,7 @@ xfs_iroot_realloc(
else
new_size = 0;
if (new_size > 0) {
new_broot = kmem_alloc(new_size, KM_SLEEP);
new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
/*
* First copy over the btree block header.
*/
@ -2349,7 +2335,8 @@ xfs_idata_realloc(
real_size = roundup(new_size, 4);
if (ifp->if_u1.if_data == NULL) {
ASSERT(ifp->if_real_bytes == 0);
ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
ifp->if_u1.if_data = kmem_alloc(real_size,
KM_SLEEP | KM_NOFS);
} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
/*
* Only do the realloc if the underlying size
@ -2360,11 +2347,12 @@ xfs_idata_realloc(
kmem_realloc(ifp->if_u1.if_data,
real_size,
ifp->if_real_bytes,
KM_SLEEP);
KM_SLEEP | KM_NOFS);
}
} else {
ASSERT(ifp->if_real_bytes == 0);
ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
ifp->if_u1.if_data = kmem_alloc(real_size,
KM_SLEEP | KM_NOFS);
memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
ifp->if_bytes);
}
@ -2731,7 +2719,6 @@ cluster_corrupt_out:
* mark it as stale and brelse.
*/
if (XFS_BUF_IODONE_FUNC(bp)) {
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_ERROR(bp,EIO);
@ -3069,8 +3056,7 @@ xfs_iflush_int(
* and unlock the inode's flush lock when the inode is
* completely written to disk.
*/
xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
xfs_iflush_done, (xfs_log_item_t *)iip);
xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
@ -3514,13 +3500,11 @@ xfs_iext_remove_indirect(
xfs_extnum_t ext_diff; /* extents to remove in current list */
xfs_extnum_t nex1; /* number of extents before idx */
xfs_extnum_t nex2; /* extents after idx + count */
int nlists; /* entries in indirection array */
int page_idx = idx; /* index in target extent list */
ASSERT(ifp->if_flags & XFS_IFEXTIREC);
erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
ASSERT(erp != NULL);
nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
nex1 = page_idx;
ext_cnt = count;
while (ext_cnt) {

View File

@ -443,8 +443,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
*/
int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
uint, uint, xfs_inode_t **);
void xfs_iput(xfs_inode_t *, uint);
void xfs_iput_new(xfs_inode_t *, uint);
void xfs_ilock(xfs_inode_t *, uint);
int xfs_ilock_nowait(xfs_inode_t *, uint);
void xfs_iunlock(xfs_inode_t *, uint);
@ -452,7 +450,7 @@ void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint);
uint xfs_ilock_map_shared(xfs_inode_t *);
void xfs_iunlock_map_shared(xfs_inode_t *, uint);
void xfs_ireclaim(xfs_inode_t *);
void xfs_inode_free(struct xfs_inode *ip);
/*
* xfs_inode.c prototypes.

View File

@ -22,30 +22,26 @@
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_trace.h"
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_inode_log_item, ili_item);
}
/*
* This returns the number of iovecs needed to log the given inode item.
*
@ -55,13 +51,11 @@ kmem_zone_t *xfs_ili_zone; /* inode log item zone */
*/
STATIC uint
xfs_inode_item_size(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip)
{
uint nvecs;
xfs_inode_t *ip;
ip = iip->ili_inode;
nvecs = 2;
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
uint nvecs = 2;
/*
* Only log the data/extents/b-tree root if there is something
@ -212,21 +206,17 @@ xfs_inode_item_size(
*/
STATIC void
xfs_inode_item_format(
xfs_inode_log_item_t *iip,
xfs_log_iovec_t *log_vector)
struct xfs_log_item *lip,
struct xfs_log_iovec *vecp)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
uint nvecs;
xfs_log_iovec_t *vecp;
xfs_inode_t *ip;
size_t data_bytes;
xfs_bmbt_rec_t *ext_buffer;
int nrecs;
xfs_mount_t *mp;
ip = iip->ili_inode;
vecp = log_vector;
vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
vecp->i_addr = &iip->ili_format;
vecp->i_len = sizeof(xfs_inode_log_format_t);
vecp->i_type = XLOG_REG_TYPE_IFORMAT;
vecp++;
@ -277,7 +267,7 @@ xfs_inode_item_format(
*/
xfs_synchronize_times(ip);
vecp->i_addr = (xfs_caddr_t)&ip->i_d;
vecp->i_addr = &ip->i_d;
vecp->i_len = sizeof(struct xfs_icdinode);
vecp->i_type = XLOG_REG_TYPE_ICORE;
vecp++;
@ -323,18 +313,17 @@ xfs_inode_item_format(
ASSERT(ip->i_df.if_u1.if_extents != NULL);
ASSERT(ip->i_d.di_nextents > 0);
ASSERT(iip->ili_extents_buf == NULL);
nrecs = ip->i_df.if_bytes /
(uint)sizeof(xfs_bmbt_rec_t);
ASSERT(nrecs > 0);
ASSERT((ip->i_df.if_bytes /
(uint)sizeof(xfs_bmbt_rec_t)) > 0);
#ifdef XFS_NATIVE_HOST
if (nrecs == ip->i_d.di_nextents) {
if (ip->i_d.di_nextents == ip->i_df.if_bytes /
(uint)sizeof(xfs_bmbt_rec_t)) {
/*
* There are no delayed allocation
* extents, so just point to the
* real extents array.
*/
vecp->i_addr =
(char *)(ip->i_df.if_u1.if_extents);
vecp->i_addr = ip->i_df.if_u1.if_extents;
vecp->i_len = ip->i_df.if_bytes;
vecp->i_type = XLOG_REG_TYPE_IEXT;
} else
@ -352,7 +341,7 @@ xfs_inode_item_format(
ext_buffer = kmem_alloc(ip->i_df.if_bytes,
KM_SLEEP);
iip->ili_extents_buf = ext_buffer;
vecp->i_addr = (xfs_caddr_t)ext_buffer;
vecp->i_addr = ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_DATA_FORK);
vecp->i_type = XLOG_REG_TYPE_IEXT;
@ -371,7 +360,7 @@ xfs_inode_item_format(
if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) {
ASSERT(ip->i_df.if_broot_bytes > 0);
ASSERT(ip->i_df.if_broot != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
vecp->i_addr = ip->i_df.if_broot;
vecp->i_len = ip->i_df.if_broot_bytes;
vecp->i_type = XLOG_REG_TYPE_IBROOT;
vecp++;
@ -389,7 +378,7 @@ xfs_inode_item_format(
ASSERT(ip->i_df.if_u1.if_data != NULL);
ASSERT(ip->i_d.di_size > 0);
vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data;
vecp->i_addr = ip->i_df.if_u1.if_data;
/*
* Round i_bytes up to a word boundary.
* The underlying memory is guaranteed to
@ -437,7 +426,7 @@ xfs_inode_item_format(
* Assert that no attribute-related log flags are set.
*/
if (!XFS_IFORK_Q(ip)) {
ASSERT(nvecs == iip->ili_item.li_desc->lid_size);
ASSERT(nvecs == lip->li_desc->lid_size);
iip->ili_format.ilf_size = nvecs;
ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
@ -449,21 +438,21 @@ xfs_inode_item_format(
ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT)));
if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) {
#ifdef DEBUG
int nrecs = ip->i_afp->if_bytes /
(uint)sizeof(xfs_bmbt_rec_t);
ASSERT(nrecs > 0);
ASSERT(nrecs == ip->i_d.di_anextents);
ASSERT(ip->i_afp->if_bytes > 0);
ASSERT(ip->i_afp->if_u1.if_extents != NULL);
ASSERT(ip->i_d.di_anextents > 0);
#ifdef DEBUG
nrecs = ip->i_afp->if_bytes /
(uint)sizeof(xfs_bmbt_rec_t);
#endif
ASSERT(nrecs > 0);
ASSERT(nrecs == ip->i_d.di_anextents);
#ifdef XFS_NATIVE_HOST
/*
* There are not delayed allocation extents
* for attributes, so just point at the array.
*/
vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents);
vecp->i_addr = ip->i_afp->if_u1.if_extents;
vecp->i_len = ip->i_afp->if_bytes;
#else
ASSERT(iip->ili_aextents_buf == NULL);
@ -473,7 +462,7 @@ xfs_inode_item_format(
ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
KM_SLEEP);
iip->ili_aextents_buf = ext_buffer;
vecp->i_addr = (xfs_caddr_t)ext_buffer;
vecp->i_addr = ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_ATTR_FORK);
#endif
@ -490,7 +479,7 @@ xfs_inode_item_format(
if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) {
ASSERT(ip->i_afp->if_broot_bytes > 0);
ASSERT(ip->i_afp->if_broot != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
vecp->i_addr = ip->i_afp->if_broot;
vecp->i_len = ip->i_afp->if_broot_bytes;
vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
vecp++;
@ -506,7 +495,7 @@ xfs_inode_item_format(
ASSERT(ip->i_afp->if_bytes > 0);
ASSERT(ip->i_afp->if_u1.if_data != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data;
vecp->i_addr = ip->i_afp->if_u1.if_data;
/*
* Round i_bytes up to a word boundary.
* The underlying memory is guaranteed to
@ -528,7 +517,7 @@ xfs_inode_item_format(
break;
}
ASSERT(nvecs == iip->ili_item.li_desc->lid_size);
ASSERT(nvecs == lip->li_desc->lid_size);
iip->ili_format.ilf_size = nvecs;
}
@ -539,12 +528,14 @@ xfs_inode_item_format(
*/
STATIC void
xfs_inode_item_pin(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip)
{
ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
trace_xfs_inode_pin(iip->ili_inode, _RET_IP_);
atomic_inc(&iip->ili_inode->i_pincount);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
trace_xfs_inode_pin(ip, _RET_IP_);
atomic_inc(&ip->i_pincount);
}
@ -554,12 +545,12 @@ xfs_inode_item_pin(
*
* Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
*/
/* ARGSUSED */
STATIC void
xfs_inode_item_unpin(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip,
int remove)
{
struct xfs_inode *ip = iip->ili_inode;
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
trace_xfs_inode_unpin(ip, _RET_IP_);
ASSERT(atomic_read(&ip->i_pincount) > 0);
@ -567,15 +558,6 @@ xfs_inode_item_unpin(
wake_up(&ip->i_ipin_wait);
}
/* ARGSUSED */
STATIC void
xfs_inode_item_unpin_remove(
xfs_inode_log_item_t *iip,
xfs_trans_t *tp)
{
xfs_inode_item_unpin(iip);
}
/*
* This is called to attempt to lock the inode associated with this
* inode log item, in preparation for the push routine which does the actual
@ -591,19 +573,16 @@ xfs_inode_item_unpin_remove(
*/
STATIC uint
xfs_inode_item_trylock(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip)
{
register xfs_inode_t *ip;
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
ip = iip->ili_inode;
if (xfs_ipincount(ip) > 0) {
if (xfs_ipincount(ip) > 0)
return XFS_ITEM_PINNED;
}
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
return XFS_ITEM_LOCKED;
}
if (!xfs_iflock_nowait(ip)) {
/*
@ -629,7 +608,7 @@ xfs_inode_item_trylock(
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
ASSERT(iip->ili_format.ilf_fields != 0);
ASSERT(iip->ili_logged == 0);
ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL);
ASSERT(lip->li_flags & XFS_LI_IN_AIL);
}
#endif
return XFS_ITEM_SUCCESS;
@ -643,26 +622,18 @@ xfs_inode_item_trylock(
*/
STATIC void
xfs_inode_item_unlock(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip)
{
uint hold;
uint iolocked;
uint lock_flags;
xfs_inode_t *ip;
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
unsigned short lock_flags;
ASSERT(iip != NULL);
ASSERT(iip->ili_inode->i_itemp != NULL);
ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
XFS_ILI_IOLOCKED_EXCL)) ||
xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL));
ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
XFS_ILI_IOLOCKED_SHARED)) ||
xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED));
/*
* Clear the transaction pointer in the inode.
*/
ip = iip->ili_inode;
ip->i_transp = NULL;
/*
@ -686,34 +657,11 @@ xfs_inode_item_unlock(
iip->ili_aextents_buf = NULL;
}
/*
* Figure out if we should unlock the inode or not.
*/
hold = iip->ili_flags & XFS_ILI_HOLD;
/*
* Before clearing out the flags, remember whether we
* are holding the inode's IO lock.
*/
iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY;
/*
* Clear out the fields of the inode log item particular
* to the current transaction.
*/
iip->ili_flags = 0;
/*
* Unlock the inode if XFS_ILI_HOLD was not set.
*/
if (!hold) {
lock_flags = XFS_ILOCK_EXCL;
if (iolocked & XFS_ILI_IOLOCKED_EXCL) {
lock_flags |= XFS_IOLOCK_EXCL;
} else if (iolocked & XFS_ILI_IOLOCKED_SHARED) {
lock_flags |= XFS_IOLOCK_SHARED;
}
xfs_iput(iip->ili_inode, lock_flags);
lock_flags = iip->ili_lock_flags;
iip->ili_lock_flags = 0;
if (lock_flags) {
xfs_iunlock(iip->ili_inode, lock_flags);
IRELE(iip->ili_inode);
}
}
@ -725,13 +673,12 @@ xfs_inode_item_unlock(
* is the only one that matters. Therefore, simply return the
* given lsn.
*/
/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_inode_item_committed(
xfs_inode_log_item_t *iip,
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
return (lsn);
return lsn;
}
/*
@ -743,13 +690,12 @@ xfs_inode_item_committed(
*/
STATIC void
xfs_inode_item_pushbuf(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip)
{
xfs_inode_t *ip;
xfs_mount_t *mp;
xfs_buf_t *bp;
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
struct xfs_buf *bp;
ip = iip->ili_inode;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
/*
@ -757,14 +703,13 @@ xfs_inode_item_pushbuf(
* inode was taken off the AIL. So, just get out.
*/
if (completion_done(&ip->i_flush) ||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
}
mp = ip->i_mount;
bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
iip->ili_format.ilf_len, XBF_TRYLOCK);
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
iip->ili_format.ilf_len, XBF_TRYLOCK);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!bp)
@ -772,10 +717,8 @@ xfs_inode_item_pushbuf(
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
return;
}
/*
* This is called to asynchronously write the inode associated with this
* inode log item out to disk. The inode will already have been locked by
@ -783,14 +726,14 @@ xfs_inode_item_pushbuf(
*/
STATIC void
xfs_inode_item_push(
xfs_inode_log_item_t *iip)
struct xfs_log_item *lip)
{
xfs_inode_t *ip;
ip = iip->ili_inode;
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
ASSERT(!completion_done(&ip->i_flush));
/*
* Since we were able to lock the inode's flush lock and
* we found it on the AIL, the inode must be dirty. This
@ -813,43 +756,34 @@ xfs_inode_item_push(
*/
(void) xfs_iflush(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
}
/*
* XXX rcc - this one really has to do something. Probably needs
* to stamp in a new field in the incore inode.
*/
/* ARGSUSED */
STATIC void
xfs_inode_item_committing(
xfs_inode_log_item_t *iip,
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
iip->ili_last_lsn = lsn;
return;
INODE_ITEM(lip)->ili_last_lsn = lsn;
}
/*
* This is the ops vector shared by all buf log items.
*/
static struct xfs_item_ops xfs_inode_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_inode_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_inode_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_inode_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_inode_item_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_inode_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push,
.iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_inode_item_committing
.iop_size = xfs_inode_item_size,
.iop_format = xfs_inode_item_format,
.iop_pin = xfs_inode_item_pin,
.iop_unpin = xfs_inode_item_unpin,
.iop_trylock = xfs_inode_item_trylock,
.iop_unlock = xfs_inode_item_unlock,
.iop_committed = xfs_inode_item_committed,
.iop_push = xfs_inode_item_push,
.iop_pushbuf = xfs_inode_item_pushbuf,
.iop_committing = xfs_inode_item_committing
};
@ -858,10 +792,10 @@ static struct xfs_item_ops xfs_inode_item_ops = {
*/
void
xfs_inode_item_init(
xfs_inode_t *ip,
xfs_mount_t *mp)
struct xfs_inode *ip,
struct xfs_mount *mp)
{
xfs_inode_log_item_t *iip;
struct xfs_inode_log_item *iip;
ASSERT(ip->i_itemp == NULL);
iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
@ -899,14 +833,14 @@ xfs_inode_item_destroy(
* from the AIL if it has not been re-logged, and unlocking the inode's
* flush lock.
*/
/*ARGSUSED*/
void
xfs_iflush_done(
xfs_buf_t *bp,
xfs_inode_log_item_t *iip)
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
xfs_inode_t *ip = iip->ili_inode;
struct xfs_ail *ailp = iip->ili_item.li_ailp;
struct xfs_ail *ailp = lip->li_ailp;
/*
* We only want to pull the item from the AIL if it is
@ -917,12 +851,11 @@ xfs_iflush_done(
* the lock since it's cheaper, and then we recheck while
* holding the lock before removing the inode from the AIL.
*/
if (iip->ili_logged &&
(iip->ili_item.li_lsn == iip->ili_flush_lsn)) {
if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
spin_lock(&ailp->xa_lock);
if (iip->ili_item.li_lsn == iip->ili_flush_lsn) {
if (lip->li_lsn == iip->ili_flush_lsn) {
/* xfs_trans_ail_delete() drops the AIL lock. */
xfs_trans_ail_delete(ailp, (xfs_log_item_t*)iip);
xfs_trans_ail_delete(ailp, lip);
} else {
spin_unlock(&ailp->xa_lock);
}
@ -940,8 +873,6 @@ xfs_iflush_done(
* Release the inode's flush lock since we're done with it.
*/
xfs_ifunlock(ip);
return;
}
/*
@ -957,10 +888,8 @@ xfs_iflush_abort(
xfs_inode_t *ip)
{
xfs_inode_log_item_t *iip = ip->i_itemp;
xfs_mount_t *mp;
iip = ip->i_itemp;
mp = ip->i_mount;
if (iip) {
struct xfs_ail *ailp = iip->ili_item.li_ailp;
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
@ -991,10 +920,10 @@ xfs_iflush_abort(
void
xfs_istale_done(
xfs_buf_t *bp,
xfs_inode_log_item_t *iip)
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
xfs_iflush_abort(iip->ili_inode);
xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
}
/*
@ -1007,9 +936,8 @@ xfs_inode_item_format_convert(
xfs_inode_log_format_t *in_f)
{
if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
xfs_inode_log_format_32_t *in_f32;
xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
in_f32 = (xfs_inode_log_format_32_t *)buf->i_addr;
in_f->ilf_type = in_f32->ilf_type;
in_f->ilf_size = in_f32->ilf_size;
in_f->ilf_fields = in_f32->ilf_fields;
@ -1025,9 +953,8 @@ xfs_inode_item_format_convert(
in_f->ilf_boffset = in_f32->ilf_boffset;
return 0;
} else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
xfs_inode_log_format_64_t *in_f64;
xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
in_f64 = (xfs_inode_log_format_64_t *)buf->i_addr;
in_f->ilf_type = in_f64->ilf_type;
in_f->ilf_size = in_f64->ilf_size;
in_f->ilf_fields = in_f64->ilf_fields;

View File

@ -103,12 +103,6 @@ typedef struct xfs_inode_log_format_64 {
XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
XFS_ILOG_ABROOT)
#define XFS_ILI_HOLD 0x1
#define XFS_ILI_IOLOCKED_EXCL 0x2
#define XFS_ILI_IOLOCKED_SHARED 0x4
#define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED)
static inline int xfs_ilog_fbroot(int w)
{
return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT);
@ -137,7 +131,7 @@ typedef struct xfs_inode_log_item {
struct xfs_inode *ili_inode; /* inode ptr */
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
xfs_lsn_t ili_last_lsn; /* lsn at last transaction */
unsigned short ili_flags; /* misc flags */
unsigned short ili_lock_flags; /* lock flags */
unsigned short ili_logged; /* flushed logged data */
unsigned int ili_last_fields; /* fields when flushed */
struct xfs_bmbt_rec *ili_extents_buf; /* array of logged
@ -161,8 +155,8 @@ static inline int xfs_inode_clean(xfs_inode_t *ip)
extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *);
extern void xfs_istale_done(struct xfs_buf *, xfs_inode_log_item_t *);
extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_iflush_abort(struct xfs_inode *);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
xfs_inode_log_format_t *);

View File

@ -23,19 +23,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
@ -123,7 +118,7 @@ xfs_iomap(
error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
bmapi_flags, NULL, 0, imap,
nimaps, NULL, NULL);
nimaps, NULL);
if (error)
goto out;
@ -138,7 +133,7 @@ xfs_iomap(
break;
}
if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
if (flags & BMAPI_DIRECT) {
error = xfs_iomap_write_direct(ip, offset, count, flags,
imap, nimaps);
} else {
@ -247,7 +242,7 @@ xfs_iomap_write_direct(
xfs_off_t offset,
size_t count,
int flags,
xfs_bmbt_irec_t *ret_imap,
xfs_bmbt_irec_t *imap,
int *nmaps)
{
xfs_mount_t *mp = ip->i_mount;
@ -261,7 +256,6 @@ xfs_iomap_write_direct(
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
xfs_bmap_free_t free_list;
uint qblocks, resblks, resrtextents;
int committed;
@ -285,10 +279,10 @@ xfs_iomap_write_direct(
if (error)
goto error_out;
} else {
if (*nmaps && (ret_imap->br_startblock == HOLESTARTBLOCK))
if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
ret_imap->br_blockcount +
ret_imap->br_startoff);
imap->br_blockcount +
imap->br_startoff);
}
count_fsb = last_fsb - offset_fsb;
ASSERT(count_fsb > 0);
@ -334,20 +328,22 @@ xfs_iomap_write_direct(
if (error)
goto error1;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
bmapi_flag = XFS_BMAPI_WRITE;
if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
* Issue the xfs_bmapi() call to allocate the blocks
* Issue the xfs_bmapi() call to allocate the blocks.
*
* From this point onwards we overwrite the imap pointer that the
* caller gave to us.
*/
xfs_bmap_init(&free_list, &firstfsb);
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag,
&firstfsb, 0, &imap, &nimaps, &free_list, NULL);
&firstfsb, 0, imap, &nimaps, &free_list);
if (error)
goto error0;
@ -369,12 +365,11 @@ xfs_iomap_write_direct(
goto error_out;
}
if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) {
error = xfs_cmn_err_fsblock_zero(ip, &imap);
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) {
error = xfs_cmn_err_fsblock_zero(ip, imap);
goto error_out;
}
*ret_imap = imap;
*nmaps = 1;
return 0;
@ -425,7 +420,7 @@ xfs_iomap_eof_want_preallocate(
imaps = nimaps;
firstblock = NULLFSBLOCK;
error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0,
&firstblock, 0, imap, &imaps, NULL, NULL);
&firstblock, 0, imap, &imaps, NULL);
if (error)
return error;
for (n = 0; n < imaps; n++) {
@ -500,7 +495,7 @@ retry:
(xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
&nimaps, NULL, NULL);
&nimaps, NULL);
if (error && (error != ENOSPC))
return XFS_ERROR(error);
@ -548,7 +543,7 @@ xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
xfs_bmbt_irec_t *map,
xfs_bmbt_irec_t *imap,
int *retmap)
{
xfs_mount_t *mp = ip->i_mount;
@ -557,7 +552,6 @@ xfs_iomap_write_allocate(
xfs_fsblock_t first_block;
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
xfs_bmbt_irec_t imap;
xfs_trans_t *tp;
int nimaps, committed;
int error = 0;
@ -573,8 +567,8 @@ xfs_iomap_write_allocate(
return XFS_ERROR(error);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
count_fsb = map->br_blockcount;
map_start_fsb = map->br_startoff;
count_fsb = imap->br_blockcount;
map_start_fsb = imap->br_startoff;
XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
@ -602,8 +596,7 @@ xfs_iomap_write_allocate(
return XFS_ERROR(error);
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_bmap_init(&free_list, &first_block);
@ -654,10 +647,15 @@ xfs_iomap_write_allocate(
}
}
/* Go get the actual blocks */
/*
* Go get the actual blocks.
*
* From this point onwards we overwrite the imap
* pointer that the caller gave to us.
*/
error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
XFS_BMAPI_WRITE, &first_block, 1,
&imap, &nimaps, &free_list, NULL);
imap, &nimaps, &free_list);
if (error)
goto trans_cancel;
@ -676,13 +674,12 @@ xfs_iomap_write_allocate(
* See if we were able to allocate an extent that
* covers at least part of the callers request
*/
if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
return xfs_cmn_err_fsblock_zero(ip, &imap);
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
return xfs_cmn_err_fsblock_zero(ip, imap);
if ((offset_fsb >= imap.br_startoff) &&
(offset_fsb < (imap.br_startoff +
imap.br_blockcount))) {
*map = imap;
if ((offset_fsb >= imap->br_startoff) &&
(offset_fsb < (imap->br_startoff +
imap->br_blockcount))) {
*retmap = 1;
XFS_STATS_INC(xs_xstrat_quick);
return 0;
@ -692,8 +689,8 @@ xfs_iomap_write_allocate(
* So far we have not mapped the requested part of the
* file, just surrounding data, try again.
*/
count_fsb -= imap.br_blockcount;
map_start_fsb = imap.br_startoff + imap.br_blockcount;
count_fsb -= imap->br_blockcount;
map_start_fsb = imap->br_startoff + imap->br_blockcount;
}
trans_cancel:
@ -766,8 +763,7 @@ xfs_iomap_write_unwritten(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
/*
* Modify the unwritten extent state of the buffer.
@ -776,7 +772,7 @@ xfs_iomap_write_unwritten(
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
1, &imap, &nimaps, &free_list, NULL);
1, &imap, &nimaps, &free_list);
if (error)
goto error_on_bmapi_transaction;

View File

@ -18,17 +18,16 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
typedef enum {
/* base extent manipulation calls */
BMAPI_READ = (1 << 0), /* read extents */
BMAPI_WRITE = (1 << 1), /* create extents */
BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */
/* modifiers */
BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */
BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */
BMAPI_MMAP = (1 << 6), /* allocate for mmap write */
BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
} bmapi_flags_t;
/* base extent manipulation calls */
#define BMAPI_READ (1 << 0) /* read extents */
#define BMAPI_WRITE (1 << 1) /* create extents */
#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */
/* modifiers */
#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */
#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */
#define BMAPI_MMA (1 << 6) /* allocate for mmap write */
#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */
#define BMAPI_FLAGS \
{ BMAPI_READ, "READ" }, \
@ -36,7 +35,6 @@ typedef enum {
{ BMAPI_ALLOCATE, "ALLOCATE" }, \
{ BMAPI_IGNSTATE, "IGNSTATE" }, \
{ BMAPI_DIRECT, "DIRECT" }, \
{ BMAPI_MMAP, "MMAP" }, \
{ BMAPI_TRYLOCK, "TRYLOCK" }
struct xfs_inode;

View File

@ -24,20 +24,17 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_error.h"
#include "xfs_btree.h"
#include "xfs_trace.h"
STATIC int
xfs_internal_inum(
@ -143,7 +140,8 @@ xfs_bulkstat_one_int(
buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
break;
}
xfs_iput(ip, XFS_ILOCK_SHARED);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
IRELE(ip);
error = formatter(buffer, ubsize, ubused, buf);

View File

@ -24,8 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_log_priv.h"
@ -35,8 +33,6 @@
#include "xfs_ialloc_btree.h"
#include "xfs_log_recover.h"
#include "xfs_trans_priv.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_rw.h"
@ -337,7 +333,6 @@ xfs_log_reserve(
int retval = 0;
ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
ASSERT((flags & XFS_LOG_NOSLEEP) == 0);
if (XLOG_FORCED_SHUTDOWN(log))
return XFS_ERROR(EIO);
@ -552,7 +547,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
.magic = XLOG_UNMOUNT_TYPE,
};
struct xfs_log_iovec reg = {
.i_addr = (void *)&magic,
.i_addr = &magic,
.i_len = sizeof(magic),
.i_type = XLOG_REG_TYPE_UNMOUNT,
};
@ -1047,7 +1042,6 @@ xlog_alloc_log(xfs_mount_t *mp,
xlog_in_core_t *iclog, *prev_iclog=NULL;
xfs_buf_t *bp;
int i;
int iclogsize;
int error = ENOMEM;
uint log2_size = 0;
@ -1127,7 +1121,6 @@ xlog_alloc_log(xfs_mount_t *mp,
* with different amounts of memory. See the definition of
* xlog_in_core_t in xfs_log_priv.h for details.
*/
iclogsize = log->l_iclog_size;
ASSERT(log->l_iclog_size >= 4096);
for (i=0; i < log->l_iclog_bufs; i++) {
*iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
@ -1428,11 +1421,8 @@ xlog_sync(xlog_t *log,
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_LOG_BUFFER;
/*
* Do an ordered write for the log block.
* Its unnecessary to flush the first split block in the log wrap case.
*/
if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER))
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
XFS_BUF_ORDERED(bp);
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);

View File

@ -55,14 +55,10 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
/*
* Flags to xfs_log_reserve()
*
* XFS_LOG_SLEEP: If space is not available, sleep (default)
* XFS_LOG_NOSLEEP: If space is not available, return error
* XFS_LOG_PERM_RESERV: Permanent reservation. When writes are
* performed against this type of reservation, the reservation
* is not decreased. Long running transactions should use this.
*/
#define XFS_LOG_SLEEP 0x0
#define XFS_LOG_NOSLEEP 0x1
#define XFS_LOG_PERM_RESERV 0x2
/*
@ -104,7 +100,7 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
#define XLOG_REG_TYPE_MAX 19
typedef struct xfs_log_iovec {
xfs_caddr_t i_addr; /* beginning address of region */
void *i_addr; /* beginning address of region */
int i_len; /* length in bytes of region */
uint i_type; /* type of region */
} xfs_log_iovec_t;
@ -201,9 +197,4 @@ int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
#endif
extern int xlog_debug; /* set to 1 to enable real log */
#endif /* __XFS_LOG_H__ */

View File

@ -26,8 +26,6 @@
#include "xfs_log_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_alloc.h"
@ -554,7 +552,7 @@ xlog_cil_push(
thdr.th_type = XFS_TRANS_CHECKPOINT;
thdr.th_tid = tic->t_tid;
thdr.th_num_items = num_iovecs;
lhdr.i_addr = (xfs_caddr_t)&thdr;
lhdr.i_addr = &thdr;
lhdr.i_len = sizeof(xfs_trans_header_t);
lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);

View File

@ -24,15 +24,11 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@ -1565,9 +1561,7 @@ xlog_recover_reorder_trans(
list_splice_init(&trans->r_itemq, &sort_list);
list_for_each_entry_safe(item, n, &sort_list, ri_list) {
xfs_buf_log_format_t *buf_f;
buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
@ -1892,9 +1886,8 @@ xlog_recover_do_inode_buffer(
* current di_next_unlinked field. Extract its value
* and copy it to the buffer copy.
*/
logged_nextp = (xfs_agino_t *)
((char *)(item->ri_buf[item_index].i_addr) +
(next_unlinked_offset - reg_buf_offset));
logged_nextp = item->ri_buf[item_index].i_addr +
next_unlinked_offset - reg_buf_offset;
if (unlikely(*logged_nextp == 0)) {
xfs_fs_cmn_err(CE_ALERT, mp,
"bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
@ -1973,8 +1966,7 @@ xlog_recover_do_reg_buffer(
item->ri_buf[i].i_len, __func__);
goto next;
}
error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
item->ri_buf[i].i_addr,
error = xfs_qm_dqcheck(item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
if (error)
@ -2187,7 +2179,7 @@ xlog_recover_do_buffer_trans(
xlog_recover_item_t *item,
int pass)
{
xfs_buf_log_format_t *buf_f;
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
xfs_mount_t *mp;
xfs_buf_t *bp;
int error;
@ -2197,8 +2189,6 @@ xlog_recover_do_buffer_trans(
ushort flags;
uint buf_flags;
buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
if (pass == XLOG_RECOVER_PASS1) {
/*
* In this pass we're only looking for buf items
@ -2319,10 +2309,9 @@ xlog_recover_do_inode_trans(
}
if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
in_f = item->ri_buf[0].i_addr;
} else {
in_f = (xfs_inode_log_format_t *)kmem_alloc(
sizeof(xfs_inode_log_format_t), KM_SLEEP);
in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
need_free = 1;
error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
if (error)
@ -2370,7 +2359,7 @@ xlog_recover_do_inode_trans(
error = EFSCORRUPTED;
goto error;
}
dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
dicp = item->ri_buf[1].i_addr;
if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
@ -2461,7 +2450,7 @@ xlog_recover_do_inode_trans(
}
/* The core is in in-core format */
xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
/* the rest is in on-disk format */
if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
@ -2578,7 +2567,7 @@ xlog_recover_do_quotaoff_trans(
return (0);
}
qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
qoff_f = item->ri_buf[0].i_addr;
ASSERT(qoff_f);
/*
@ -2622,9 +2611,8 @@ xlog_recover_do_dquot_trans(
if (mp->m_qflags == 0)
return (0);
recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
if (item->ri_buf[1].i_addr == NULL) {
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL) {
cmn_err(CE_ALERT,
"XFS: NULL dquot in %s.", __func__);
return XFS_ERROR(EIO);
@ -2654,7 +2642,7 @@ xlog_recover_do_dquot_trans(
* The other possibility, of course, is that the quota subsystem was
* removed since the last mount - ENOSYS.
*/
dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
if ((error = xfs_qm_dqcheck(recddq,
dq_f->qlf_id,
@ -2721,7 +2709,7 @@ xlog_recover_do_efi_trans(
return 0;
}
efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
efi_formatp = item->ri_buf[0].i_addr;
mp = log->l_mp;
efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
@ -2767,7 +2755,7 @@ xlog_recover_do_efd_trans(
return;
}
efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
efd_formatp = item->ri_buf[0].i_addr;
ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
(item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +

View File

@ -25,13 +25,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"

View File

@ -66,65 +66,6 @@ struct xfs_nameops;
struct xfs_ail;
struct xfs_quotainfo;
/*
* Prototypes and functions for the Data Migration subsystem.
*/
typedef int (*xfs_send_data_t)(int, struct xfs_inode *,
xfs_off_t, size_t, int, int *);
typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
struct xfs_inode *, dm_right_t,
struct xfs_inode *, dm_right_t,
const unsigned char *, const unsigned char *,
mode_t, int, int);
typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
char *, char *);
typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
dm_right_t, mode_t, int, int);
typedef struct xfs_dmops {
xfs_send_data_t xfs_send_data;
xfs_send_mmap_t xfs_send_mmap;
xfs_send_destroy_t xfs_send_destroy;
xfs_send_namesp_t xfs_send_namesp;
xfs_send_mount_t xfs_send_mount;
xfs_send_unmount_t xfs_send_unmount;
} xfs_dmops_t;
#define XFS_DMAPI_UNMOUNT_FLAGS(mp) \
(((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED)
#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
(*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
#define XFS_SEND_MMAP(mp, vma,fl) \
(*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl)
#define XFS_SEND_DESTROY(mp, ip,right) \
(*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
(*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
#define XFS_SEND_MOUNT(mp,right,path,name) \
(*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
#define XFS_SEND_PREUNMOUNT(mp) \
do { \
if (mp->m_flags & XFS_MOUNT_DMAPI) { \
(*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \
(mp)->m_rootip, DM_RIGHT_NULL, \
(mp)->m_rootip, DM_RIGHT_NULL, \
NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
} \
} while (0)
#define XFS_SEND_UNMOUNT(mp) \
do { \
if (mp->m_flags & XFS_MOUNT_DMAPI) { \
(*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \
DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
} \
} while (0)
#ifdef HAVE_PERCPU_SB
/*
@ -241,8 +182,6 @@ typedef struct xfs_mount {
uint m_chsize; /* size of next field */
struct xfs_chash *m_chash; /* fs private inode per-cluster
* hash table */
struct xfs_dmops *m_dm_ops; /* vector of DMI ops */
struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
atomic_t m_active_trans; /* number trans frozen */
#ifdef HAVE_PERCPU_SB
xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
@ -269,7 +208,6 @@ typedef struct xfs_mount {
must be synchronous except
for space allocations */
#define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */
#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
operations, typically for
@ -282,8 +220,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
#define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */
#define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */
/* osyncisdsync is now default*/
#define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above
* 32 bits in size */
#define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */
@ -440,11 +376,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
extern int xfs_dmops_get(struct xfs_mount *);
extern void xfs_dmops_put(struct xfs_mount *);
extern struct xfs_dmops xfs_dmcore_xfs;
#endif /* __KERNEL__ */
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);

View File

@ -24,12 +24,9 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@ -116,20 +113,7 @@ xfs_rename(
int spaceres;
int num_inodes;
xfs_itrace_entry(src_dp);
xfs_itrace_entry(target_dp);
if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) ||
DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
src_dp, DM_RIGHT_NULL,
target_dp, DM_RIGHT_NULL,
src_name->name, target_name->name,
0, 0, 0);
if (error)
return error;
}
/* Return through std_return after this point. */
trace_xfs_rename(src_dp, target_dp, src_name, target_name);
new_parent = (src_dp != target_dp);
src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
@ -184,26 +168,14 @@ xfs_rename(
/*
* Join all the inodes to the transaction. From this point on,
* we can rely on either trans_commit or trans_cancel to unlock
* them. Note that we need to add a vnode reference to the
* directories since trans_commit & trans_cancel will decrement
* them when they unlock the inodes. Also, we need to be careful
* not to add an inode to the transaction more than once.
* them.
*/
IHOLD(src_dp);
xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
if (new_parent) {
IHOLD(target_dp);
xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
}
IHOLD(src_ip);
xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
if (target_ip) {
IHOLD(target_ip);
xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
}
xfs_trans_ijoin_ref(tp, src_dp, XFS_ILOCK_EXCL);
if (new_parent)
xfs_trans_ijoin_ref(tp, target_dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, src_ip, XFS_ILOCK_EXCL);
if (target_ip)
xfs_trans_ijoin_ref(tp, target_ip, XFS_ILOCK_EXCL);
/*
* If we are using project inheritance, we only allow renames
@ -369,26 +341,13 @@ xfs_rename(
* trans_commit will unlock src_ip, target_ip & decrement
* the vnode references.
*/
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
/* Fall through to std_return with error = 0 or errno from
* xfs_trans_commit */
std_return:
if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) ||
DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) {
(void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
src_dp, DM_RIGHT_NULL,
target_dp, DM_RIGHT_NULL,
src_name->name, target_name->name,
0, error, 0);
}
return error;
return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
/* FALLTHROUGH */
error_return:
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp, cancel_flags);
goto std_return;
std_return:
return error;
}

View File

@ -25,17 +25,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
@ -129,7 +122,7 @@ xfs_growfs_rt_alloc(
cancelflags |= XFS_TRANS_ABORT;
error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock,
resblks, &map, &nmap, &flist, NULL);
resblks, &map, &nmap, &flist);
if (!error && nmap < 1)
error = XFS_ERROR(ENOSPC);
if (error)

View File

@ -24,27 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_itable.h"
#include "xfs_btree.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_attr.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_buf_item.h"
#include "xfs_rw.h"
#include "xfs_trace.h"
/*
* Force a shutdown of the filesystem instantly while keeping

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* Copyright (C) 2010 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@ -24,16 +25,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@ -47,6 +44,7 @@
#include "xfs_trace.h"
kmem_zone_t *xfs_trans_zone;
kmem_zone_t *xfs_log_item_desc_zone;
/*
@ -597,8 +595,7 @@ _xfs_trans_alloc(
tp->t_magic = XFS_TRANS_MAGIC;
tp->t_type = type;
tp->t_mountp = mp;
tp->t_items_free = XFS_LIC_NUM_SLOTS;
xfs_lic_init(&(tp->t_items));
INIT_LIST_HEAD(&tp->t_items);
INIT_LIST_HEAD(&tp->t_busy);
return tp;
}
@ -643,8 +640,7 @@ xfs_trans_dup(
ntp->t_magic = XFS_TRANS_MAGIC;
ntp->t_type = tp->t_type;
ntp->t_mountp = tp->t_mountp;
ntp->t_items_free = XFS_LIC_NUM_SLOTS;
xfs_lic_init(&(ntp->t_items));
INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy);
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
@ -1123,6 +1119,108 @@ xfs_trans_unreserve_and_mod_sb(
}
}
/*
* Add the given log item to the transaction's list of log items.
*
* The log item will now point to its new descriptor with its li_desc field.
*/
void
xfs_trans_add_item(
struct xfs_trans *tp,
struct xfs_log_item *lip)
{
struct xfs_log_item_desc *lidp;
ASSERT(lip->li_mountp = tp->t_mountp);
ASSERT(lip->li_ailp = tp->t_mountp->m_ail);
lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
lidp->lid_item = lip;
lidp->lid_flags = 0;
lidp->lid_size = 0;
list_add_tail(&lidp->lid_trans, &tp->t_items);
lip->li_desc = lidp;
}
STATIC void
xfs_trans_free_item_desc(
struct xfs_log_item_desc *lidp)
{
list_del_init(&lidp->lid_trans);
kmem_zone_free(xfs_log_item_desc_zone, lidp);
}
/*
* Unlink and free the given descriptor.
*/
void
xfs_trans_del_item(
struct xfs_log_item *lip)
{
xfs_trans_free_item_desc(lip->li_desc);
lip->li_desc = NULL;
}
/*
* Unlock all of the items of a transaction and free all the descriptors
* of that transaction.
*/
STATIC void
xfs_trans_free_items(
struct xfs_trans *tp,
xfs_lsn_t commit_lsn,
int flags)
{
struct xfs_log_item_desc *lidp, *next;
list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
struct xfs_log_item *lip = lidp->lid_item;
lip->li_desc = NULL;
if (commit_lsn != NULLCOMMITLSN)
IOP_COMMITTING(lip, commit_lsn);
if (flags & XFS_TRANS_ABORT)
lip->li_flags |= XFS_LI_ABORTED;
IOP_UNLOCK(lip);
xfs_trans_free_item_desc(lidp);
}
}
/*
* Unlock the items associated with a transaction.
*
* Items which were not logged should be freed. Those which were logged must
* still be tracked so they can be unpinned when the transaction commits.
*/
STATIC void
xfs_trans_unlock_items(
struct xfs_trans *tp,
xfs_lsn_t commit_lsn)
{
struct xfs_log_item_desc *lidp, *next;
list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
struct xfs_log_item *lip = lidp->lid_item;
lip->li_desc = NULL;
if (commit_lsn != NULLCOMMITLSN)
IOP_COMMITTING(lip, commit_lsn);
IOP_UNLOCK(lip);
/*
* Free the descriptor if the item is not dirty
* within this transaction.
*/
if (!(lidp->lid_flags & XFS_LID_DIRTY))
xfs_trans_free_item_desc(lidp);
}
}
/*
* Total up the number of log iovecs needed to commit this
* transaction. The transaction itself needs one for the
@ -1134,30 +1232,27 @@ xfs_trans_count_vecs(
struct xfs_trans *tp)
{
int nvecs;
xfs_log_item_desc_t *lidp;
struct xfs_log_item_desc *lidp;
nvecs = 1;
lidp = xfs_trans_first_item(tp);
ASSERT(lidp != NULL);
/* In the non-debug case we need to start bailing out if we
* didn't find a log_item here, return zero and let trans_commit
* deal with it.
*/
if (lidp == NULL)
if (list_empty(&tp->t_items)) {
ASSERT(0);
return 0;
}
while (lidp != NULL) {
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
/*
* Skip items which aren't dirty in this transaction.
*/
if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
lidp = xfs_trans_next_item(tp, lidp);
if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
}
lidp->lid_size = IOP_SIZE(lidp->lid_item);
nvecs += lidp->lid_size;
lidp = xfs_trans_next_item(tp, lidp);
}
return nvecs;
@ -1177,7 +1272,7 @@ xfs_trans_fill_vecs(
struct xfs_trans *tp,
struct xfs_log_iovec *log_vector)
{
xfs_log_item_desc_t *lidp;
struct xfs_log_item_desc *lidp;
struct xfs_log_iovec *vecp;
uint nitems;
@ -1188,14 +1283,11 @@ xfs_trans_fill_vecs(
vecp = log_vector + 1;
nitems = 0;
lidp = xfs_trans_first_item(tp);
ASSERT(lidp);
while (lidp) {
ASSERT(!list_empty(&tp->t_items));
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
/* Skip items which aren't dirty in this transaction. */
if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
lidp = xfs_trans_next_item(tp, lidp);
if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
}
/*
* The item may be marked dirty but not log anything. This can
@ -1206,7 +1298,6 @@ xfs_trans_fill_vecs(
IOP_FORMAT(lidp->lid_item, vecp);
vecp += lidp->lid_size;
IOP_PIN(lidp->lid_item);
lidp = xfs_trans_next_item(tp, lidp);
}
/*
@ -1284,7 +1375,7 @@ xfs_trans_item_committed(
* log item flags, if anyone else stales the buffer we do not want to
* pay any attention to it.
*/
IOP_UNPIN(lip);
IOP_UNPIN(lip, 0);
}
/*
@ -1301,24 +1392,15 @@ xfs_trans_committed(
struct xfs_trans *tp,
int abortflag)
{
xfs_log_item_desc_t *lidp;
xfs_log_item_chunk_t *licp;
xfs_log_item_chunk_t *next_licp;
struct xfs_log_item_desc *lidp, *next;
/* Call the transaction's completion callback if there is one. */
if (tp->t_callback != NULL)
tp->t_callback(tp, tp->t_callarg);
for (lidp = xfs_trans_first_item(tp);
lidp != NULL;
lidp = xfs_trans_next_item(tp, lidp)) {
list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
}
/* free the item chunks, ignoring the embedded chunk */
for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) {
next_licp = licp->lic_next;
kmem_free(licp);
xfs_trans_free_item_desc(lidp);
}
xfs_trans_free(tp);
@ -1333,16 +1415,14 @@ xfs_trans_uncommit(
struct xfs_trans *tp,
uint flags)
{
xfs_log_item_desc_t *lidp;
struct xfs_log_item_desc *lidp;
for (lidp = xfs_trans_first_item(tp);
lidp != NULL;
lidp = xfs_trans_next_item(tp, lidp)) {
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
/*
* Unpin all but those that aren't dirty.
*/
if (lidp->lid_flags & XFS_LID_DIRTY)
IOP_UNPIN_REMOVE(lidp->lid_item, tp);
IOP_UNPIN(lidp->lid_item, 1);
}
xfs_trans_unreserve_and_mod_sb(tp);
@ -1508,33 +1588,28 @@ STATIC struct xfs_log_vec *
xfs_trans_alloc_log_vecs(
xfs_trans_t *tp)
{
xfs_log_item_desc_t *lidp;
struct xfs_log_item_desc *lidp;
struct xfs_log_vec *lv = NULL;
struct xfs_log_vec *ret_lv = NULL;
lidp = xfs_trans_first_item(tp);
/* Bail out if we didn't find a log item. */
if (!lidp) {
if (list_empty(&tp->t_items)) {
ASSERT(0);
return NULL;
}
while (lidp != NULL) {
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
struct xfs_log_vec *new_lv;
/* Skip items which aren't dirty in this transaction. */
if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
lidp = xfs_trans_next_item(tp, lidp);
if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
}
/* Skip items that do not have any vectors for writing */
lidp->lid_size = IOP_SIZE(lidp->lid_item);
if (!lidp->lid_size) {
lidp = xfs_trans_next_item(tp, lidp);
if (!lidp->lid_size)
continue;
}
new_lv = kmem_zalloc(sizeof(*new_lv) +
lidp->lid_size * sizeof(struct xfs_log_iovec),
@ -1549,7 +1624,6 @@ xfs_trans_alloc_log_vecs(
else
lv->lv_next = new_lv;
lv = new_lv;
lidp = xfs_trans_next_item(tp, lidp);
}
return ret_lv;
@ -1708,12 +1782,6 @@ xfs_trans_cancel(
int flags)
{
int log_flags;
#ifdef DEBUG
xfs_log_item_chunk_t *licp;
xfs_log_item_desc_t *lidp;
xfs_log_item_t *lip;
int i;
#endif
xfs_mount_t *mp = tp->t_mountp;
/*
@ -1732,21 +1800,11 @@ xfs_trans_cancel(
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
#ifdef DEBUG
if (!(flags & XFS_TRANS_ABORT)) {
licp = &(tp->t_items);
while (licp != NULL) {
lidp = licp->lic_descs;
for (i = 0; i < licp->lic_unused; i++, lidp++) {
if (xfs_lic_isfree(licp, i)) {
continue;
}
if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
struct xfs_log_item_desc *lidp;
lip = lidp->lid_item;
if (!XFS_FORCED_SHUTDOWN(mp))
ASSERT(!(lip->li_type == XFS_LI_EFD));
}
licp = licp->lic_next;
}
list_for_each_entry(lidp, &tp->t_items, lid_trans)
ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
}
#endif
xfs_trans_unreserve_and_mod_sb(tp);
@ -1834,7 +1892,6 @@ xfs_trans_roll(
if (error)
return error;
xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(trans, dp);
xfs_trans_ijoin(trans, dp);
return 0;
}

View File

@ -161,105 +161,14 @@ typedef struct xfs_trans_header {
* the amount of space needed to log the item it describes
* once we get to commit processing (see xfs_trans_commit()).
*/
typedef struct xfs_log_item_desc {
struct xfs_log_item_desc {
struct xfs_log_item *lid_item;
ushort lid_size;
unsigned char lid_flags;
unsigned char lid_index;
} xfs_log_item_desc_t;
ushort lid_size;
unsigned char lid_flags;
struct list_head lid_trans;
};
#define XFS_LID_DIRTY 0x1
#define XFS_LID_PINNED 0x2
/*
* This structure is used to maintain a chunk list of log_item_desc
* structures. The free field is a bitmask indicating which descriptors
* in this chunk's array are free. The unused field is the first value
* not used since this chunk was allocated.
*/
#define XFS_LIC_NUM_SLOTS 15
typedef struct xfs_log_item_chunk {
struct xfs_log_item_chunk *lic_next;
ushort lic_free;
ushort lic_unused;
xfs_log_item_desc_t lic_descs[XFS_LIC_NUM_SLOTS];
} xfs_log_item_chunk_t;
#define XFS_LIC_MAX_SLOT (XFS_LIC_NUM_SLOTS - 1)
#define XFS_LIC_FREEMASK ((1 << XFS_LIC_NUM_SLOTS) - 1)
/*
* Initialize the given chunk. Set the chunk's free descriptor mask
* to indicate that all descriptors are free. The caller gets to set
* lic_unused to the right value (0 matches all free). The
* lic_descs.lid_index values are set up as each desc is allocated.
*/
static inline void xfs_lic_init(xfs_log_item_chunk_t *cp)
{
cp->lic_free = XFS_LIC_FREEMASK;
}
static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
{
cp->lic_descs[slot].lid_index = (unsigned char)(slot);
}
static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
{
return cp->lic_free & XFS_LIC_FREEMASK;
}
static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp)
{
cp->lic_free = XFS_LIC_FREEMASK;
}
static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
{
return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK);
}
static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
{
return (cp->lic_free & (1 << slot));
}
static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
{
cp->lic_free &= ~(1 << slot);
}
static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
{
cp->lic_free |= 1 << slot;
}
static inline xfs_log_item_desc_t *
xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
{
return &(cp->lic_descs[slot]);
}
static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
{
return (uint)dp->lid_index;
}
/*
* Calculate the address of a chunk given a descriptor pointer:
* dp - dp->lid_index give the address of the start of the lic_descs array.
* From this we subtract the offset of the lic_descs field in a chunk.
* All of this yields the address of the chunk, which is
* cast to a chunk pointer.
*/
static inline xfs_log_item_chunk_t *
xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
{
return (xfs_log_item_chunk_t*) \
(((xfs_caddr_t)((dp) - (dp)->lid_index)) - \
(xfs_caddr_t)(((xfs_log_item_chunk_t*)0)->lic_descs));
}
#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */
/*
@ -275,8 +184,6 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
/*
* Values for call flags parameter.
*/
#define XFS_TRANS_NOSLEEP 0x1
#define XFS_TRANS_WAIT 0x2
#define XFS_TRANS_RELEASE_LOG_RES 0x4
#define XFS_TRANS_ABORT 0x8
@ -438,8 +345,7 @@ typedef struct xfs_item_ops {
uint (*iop_size)(xfs_log_item_t *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
void (*iop_pin)(xfs_log_item_t *);
void (*iop_unpin)(xfs_log_item_t *);
void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *);
void (*iop_unpin)(xfs_log_item_t *, int remove);
uint (*iop_trylock)(xfs_log_item_t *);
void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
@ -451,8 +357,7 @@ typedef struct xfs_item_ops {
#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
#define IOP_UNPIN(ip) (*(ip)->li_ops->iop_unpin)(ip)
#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn)
@ -516,8 +421,7 @@ typedef struct xfs_trans {
int64_t t_rblocks_delta;/* superblock rblocks change */
int64_t t_rextents_delta;/* superblocks rextents chg */
int64_t t_rextslog_delta;/* superblocks rextslog chg */
unsigned int t_items_free; /* log item descs free */
xfs_log_item_chunk_t t_items; /* first log item desc chunk */
struct list_head t_items; /* log item descriptors */
xfs_trans_header_t t_header; /* header for in-log trans */
struct list_head t_busy; /* list of busy extents */
unsigned long t_pflags; /* saved process flags state */
@ -569,8 +473,8 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
xfs_ino_t , uint, uint, struct xfs_inode **);
void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint);
void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *);
void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);
@ -595,6 +499,7 @@ int xfs_trans_ail_init(struct xfs_mount *);
void xfs_trans_ail_destroy(struct xfs_mount *);
extern kmem_zone_t *xfs_trans_zone;
extern kmem_zone_t *xfs_log_item_desc_zone;
#endif /* __KERNEL__ */

View File

@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"

View File

@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_buf_item.h"
@ -51,36 +47,17 @@ xfs_trans_buf_item_match(
xfs_daddr_t blkno,
int len)
{
xfs_log_item_chunk_t *licp;
xfs_log_item_desc_t *lidp;
xfs_buf_log_item_t *blip;
int i;
struct xfs_log_item_desc *lidp;
struct xfs_buf_log_item *blip;
len = BBTOB(len);
for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
if (xfs_lic_are_all_free(licp)) {
ASSERT(licp == &tp->t_items);
ASSERT(licp->lic_next == NULL);
return NULL;
}
for (i = 0; i < licp->lic_unused; i++) {
/*
* Skip unoccupied slots.
*/
if (xfs_lic_isfree(licp, i))
continue;
lidp = xfs_lic_slot(licp, i);
blip = (xfs_buf_log_item_t *)lidp->lid_item;
if (blip->bli_item.li_type != XFS_LI_BUF)
continue;
if (XFS_BUF_TARGET(blip->bli_buf) == target &&
XFS_BUF_ADDR(blip->bli_buf) == blkno &&
XFS_BUF_COUNT(blip->bli_buf) == len)
return blip->bli_buf;
}
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
blip = (struct xfs_buf_log_item *)lidp->lid_item;
if (blip->bli_item.li_type == XFS_LI_BUF &&
XFS_BUF_TARGET(blip->bli_buf) == target &&
XFS_BUF_ADDR(blip->bli_buf) == blkno &&
XFS_BUF_COUNT(blip->bli_buf) == len)
return blip->bli_buf;
}
return NULL;
@ -127,7 +104,7 @@ _xfs_trans_bjoin(
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
xfs_trans_add_item(tp, &bip->bli_item);
/*
* Initialize b_fsprivate2 so we can find it with incore_match()
@ -483,7 +460,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
{
xfs_buf_log_item_t *bip;
xfs_log_item_t *lip;
xfs_log_item_desc_t *lidp;
/*
* Default to a normal brelse() call if the tp is NULL.
@ -514,13 +490,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
/*
* Find the item descriptor pointing to this buffer's
* log item. It must be there.
*/
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
trace_xfs_trans_brelse(bip);
/*
@ -536,7 +505,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
* If the buffer is dirty within this transaction, we can't
* release it until we commit.
*/
if (lidp->lid_flags & XFS_LID_DIRTY)
if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
return;
/*
@ -553,7 +522,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
/*
* Free up the log item descriptor tracking the released item.
*/
xfs_trans_free_item(tp, lidp);
xfs_trans_del_item(&bip->bli_item);
/*
* Clear the hold flag in the buf log item if it is set.
@ -665,7 +634,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
uint last)
{
xfs_buf_log_item_t *bip;
xfs_log_item_desc_t *lidp;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
@ -690,7 +658,7 @@ xfs_trans_log_buf(xfs_trans_t *tp,
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
bip->bli_item.li_cb = xfs_buf_iodone;
trace_xfs_trans_log_buf(bip);
@ -707,11 +675,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
}
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
}
@ -740,7 +705,6 @@ xfs_trans_binval(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_log_item_desc_t *lidp;
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
@ -748,8 +712,6 @@ xfs_trans_binval(
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_trans_binval(bip);
@ -764,7 +726,7 @@ xfs_trans_binval(
ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
return;
}
@ -797,7 +759,7 @@ xfs_trans_binval(
bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
memset((char *)(bip->bli_format.blf_data_map), 0,
(bip->bli_format.blf_map_size * sizeof(uint)));
lidp->lid_flags |= XFS_LID_DIRTY;
bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
tp->t_flags |= XFS_TRANS_DIRTY;
}
@ -853,12 +815,9 @@ xfs_trans_stale_inode_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_STALE_INODE;
bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))
xfs_buf_iodone;
bip->bli_item.li_cb = xfs_buf_iodone;
}
/*
* Mark the buffer as being one which contains newly allocated
* inodes. We need to make sure that even if this buffer is

View File

@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
@ -49,9 +48,8 @@ xfs_trans_get_efi(xfs_trans_t *tp,
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip);
return (efip);
xfs_trans_add_item(tp, &efip->efi_item);
return efip;
}
/*
@ -65,15 +63,11 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
xfs_fsblock_t start_block,
xfs_extlen_t ext_len)
{
xfs_log_item_desc_t *lidp;
uint next_extent;
xfs_extent_t *extp;
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip);
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
next_extent = efip->efi_next_extent;
ASSERT(next_extent < efip->efi_format.efi_nextents);
@ -106,9 +100,8 @@ xfs_trans_get_efd(xfs_trans_t *tp,
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp);
return (efdp);
xfs_trans_add_item(tp, &efdp->efd_item);
return efdp;
}
/*
@ -122,15 +115,11 @@ xfs_trans_log_efd_extent(xfs_trans_t *tp,
xfs_fsblock_t start_block,
xfs_extlen_t ext_len)
{
xfs_log_item_desc_t *lidp;
uint next_extent;
xfs_extent_t *extp;
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp);
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
efdp->efd_item.li_desc->lid_flags |= XFS_LID_DIRTY;
next_extent = efdp->efd_next_extent;
ASSERT(next_extent < efdp->efd_format.efd_nextents);

View File

@ -24,20 +24,16 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
#ifdef XFS_TRANS_DEBUG
STATIC void
@ -47,7 +43,6 @@ xfs_trans_inode_broot_debug(
#define xfs_trans_inode_broot_debug(ip)
#endif
/*
* Get an inode and join it to the transaction.
*/
@ -63,49 +58,39 @@ xfs_trans_iget(
int error;
error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
if (!error && tp)
xfs_trans_ijoin(tp, *ipp, lock_flags);
if (!error && tp) {
xfs_trans_ijoin(tp, *ipp);
(*ipp)->i_itemp->ili_lock_flags = lock_flags;
}
return error;
}
/*
* Add the locked inode to the transaction.
* The inode must be locked, and it cannot be associated with any
* transaction. The caller must specify the locks already held
* on the inode.
* Add a locked inode to the transaction.
*
* The inode must be locked, and it cannot be associated with any transaction.
*/
void
xfs_trans_ijoin(
xfs_trans_t *tp,
xfs_inode_t *ip,
uint lock_flags)
struct xfs_trans *tp,
struct xfs_inode *ip)
{
xfs_inode_log_item_t *iip;
ASSERT(ip->i_transp == NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(lock_flags & XFS_ILOCK_EXCL);
if (ip->i_itemp == NULL)
xfs_inode_item_init(ip, ip->i_mount);
iip = ip->i_itemp;
ASSERT(iip->ili_flags == 0);
ASSERT(iip->ili_lock_flags == 0);
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip));
xfs_trans_add_item(tp, &iip->ili_item);
xfs_trans_inode_broot_debug(ip);
/*
* If the IO lock is already held, mark that in the inode log item.
*/
if (lock_flags & XFS_IOLOCK_EXCL) {
iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
} else if (lock_flags & XFS_IOLOCK_SHARED) {
iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
}
/*
* Initialize i_transp so we can find it with xfs_inode_incore()
* in xfs_trans_iget() above.
@ -113,27 +98,25 @@ xfs_trans_ijoin(
ip->i_transp = tp;
}
/*
* Mark the inode as not needing to be unlocked when the inode item's
* IOP_UNLOCK() routine is called. The inode must already be locked
* and associated with the given transaction.
* Add a locked inode to the transaction.
*
*
* Grabs a reference to the inode which will be dropped when the transaction
* is commited. The inode will also be unlocked at that point. The inode
* must be locked, and it cannot be associated with any transaction.
*/
/*ARGSUSED*/
void
xfs_trans_ihold(
xfs_trans_t *tp,
xfs_inode_t *ip)
xfs_trans_ijoin_ref(
struct xfs_trans *tp,
struct xfs_inode *ip,
uint lock_flags)
{
ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
xfs_trans_ijoin(tp, ip);
IHOLD(ip);
ip->i_itemp->ili_lock_flags = lock_flags;
}
/*
* This is called to mark the fields indicated in fieldmask as needing
* to be logged when the transaction is committed. The inode must
@ -149,17 +132,12 @@ xfs_trans_log_inode(
xfs_inode_t *ip,
uint flags)
{
xfs_log_item_desc_t *lidp;
ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY;
/*
* Always OR in the bits from the ili_last_fields field.

View File

@ -1,441 +0,0 @@
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
/* XXX: from here down needed until struct xfs_trans has its own ailp */
#include "xfs_bit.h"
#include "xfs_buf_item.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *,
int, int, xfs_lsn_t);
/*
* This is called to add the given log item to the transaction's
* list of log items. It must find a free log item descriptor
* or allocate a new one and add the item to that descriptor.
* The function returns a pointer to item descriptor used to point
* to the new item. The log item will now point to its new descriptor
* with its li_desc field.
*/
xfs_log_item_desc_t *
xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
{
xfs_log_item_desc_t *lidp;
xfs_log_item_chunk_t *licp;
int i=0;
/*
* If there are no free descriptors, allocate a new chunk
* of them and put it at the front of the chunk list.
*/
if (tp->t_items_free == 0) {
licp = (xfs_log_item_chunk_t*)
kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP);
ASSERT(licp != NULL);
/*
* Initialize the chunk, and then
* claim the first slot in the newly allocated chunk.
*/
xfs_lic_init(licp);
xfs_lic_claim(licp, 0);
licp->lic_unused = 1;
xfs_lic_init_slot(licp, 0);
lidp = xfs_lic_slot(licp, 0);
/*
* Link in the new chunk and update the free count.
*/
licp->lic_next = tp->t_items.lic_next;
tp->t_items.lic_next = licp;
tp->t_items_free = XFS_LIC_NUM_SLOTS - 1;
/*
* Initialize the descriptor and the generic portion
* of the log item.
*
* Point the new slot at this item and return it.
* Also point the log item at its currently active
* descriptor and set the item's mount pointer.
*/
lidp->lid_item = lip;
lidp->lid_flags = 0;
lidp->lid_size = 0;
lip->li_desc = lidp;
lip->li_mountp = tp->t_mountp;
lip->li_ailp = tp->t_mountp->m_ail;
return lidp;
}
/*
* Find the free descriptor. It is somewhere in the chunklist
* of descriptors.
*/
licp = &tp->t_items;
while (licp != NULL) {
if (xfs_lic_vacancy(licp)) {
if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
i = licp->lic_unused;
ASSERT(xfs_lic_isfree(licp, i));
break;
}
for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
if (xfs_lic_isfree(licp, i))
break;
}
ASSERT(i <= XFS_LIC_MAX_SLOT);
break;
}
licp = licp->lic_next;
}
ASSERT(licp != NULL);
/*
* If we find a free descriptor, claim it,
* initialize it, and return it.
*/
xfs_lic_claim(licp, i);
if (licp->lic_unused <= i) {
licp->lic_unused = i + 1;
xfs_lic_init_slot(licp, i);
}
lidp = xfs_lic_slot(licp, i);
tp->t_items_free--;
lidp->lid_item = lip;
lidp->lid_flags = 0;
lidp->lid_size = 0;
lip->li_desc = lidp;
lip->li_mountp = tp->t_mountp;
lip->li_ailp = tp->t_mountp->m_ail;
return lidp;
}
/*
* Free the given descriptor.
*
* This requires setting the bit in the chunk's free mask corresponding
* to the given slot.
*/
void
xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
{
uint slot;
xfs_log_item_chunk_t *licp;
xfs_log_item_chunk_t **licpp;
slot = xfs_lic_desc_to_slot(lidp);
licp = xfs_lic_desc_to_chunk(lidp);
xfs_lic_relse(licp, slot);
lidp->lid_item->li_desc = NULL;
tp->t_items_free++;
/*
* If there are no more used items in the chunk and this is not
* the chunk embedded in the transaction structure, then free
* the chunk. First pull it from the chunk list and then
* free it back to the heap. We didn't bother with a doubly
* linked list here because the lists should be very short
* and this is not a performance path. It's better to save
* the memory of the extra pointer.
*
* Also decrement the transaction structure's count of free items
* by the number in a chunk since we are freeing an empty chunk.
*/
if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) {
licpp = &(tp->t_items.lic_next);
while (*licpp != licp) {
ASSERT(*licpp != NULL);
licpp = &((*licpp)->lic_next);
}
*licpp = licp->lic_next;
kmem_free(licp);
tp->t_items_free -= XFS_LIC_NUM_SLOTS;
}
}
/*
* This is called to find the descriptor corresponding to the given
* log item. It returns a pointer to the descriptor.
* The log item MUST have a corresponding descriptor in the given
* transaction. This routine does not return NULL, it panics.
*
* The descriptor pointer is kept in the log item's li_desc field.
* Just return it.
*/
/*ARGSUSED*/
xfs_log_item_desc_t *
xfs_trans_find_item(xfs_trans_t *tp, xfs_log_item_t *lip)
{
ASSERT(lip->li_desc != NULL);
return lip->li_desc;
}
/*
* Return a pointer to the first descriptor in the chunk list.
* This does not return NULL if there are none, it panics.
*
* The first descriptor must be in either the first or second chunk.
* This is because the only chunk allowed to be empty is the first.
* All others are freed when they become empty.
*
* At some point this and xfs_trans_next_item() should be optimized
* to quickly look at the mask to determine if there is anything to
* look at.
*/
xfs_log_item_desc_t *
xfs_trans_first_item(xfs_trans_t *tp)
{
xfs_log_item_chunk_t *licp;
int i;
licp = &tp->t_items;
/*
* If it's not in the first chunk, skip to the second.
*/
if (xfs_lic_are_all_free(licp)) {
licp = licp->lic_next;
}
/*
* Return the first non-free descriptor in the chunk.
*/
ASSERT(!xfs_lic_are_all_free(licp));
for (i = 0; i < licp->lic_unused; i++) {
if (xfs_lic_isfree(licp, i)) {
continue;
}
return xfs_lic_slot(licp, i);
}
cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
return NULL;
}
/*
* Given a descriptor, return the next descriptor in the chunk list.
* This returns NULL if there are no more used descriptors in the list.
*
* We do this by first locating the chunk in which the descriptor resides,
* and then scanning forward in the chunk and the list for the next
* used descriptor.
*/
/*ARGSUSED*/
xfs_log_item_desc_t *
xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
{
xfs_log_item_chunk_t *licp;
int i;
licp = xfs_lic_desc_to_chunk(lidp);
/*
* First search the rest of the chunk. The for loop keeps us
* from referencing things beyond the end of the chunk.
*/
for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) {
if (xfs_lic_isfree(licp, i)) {
continue;
}
return xfs_lic_slot(licp, i);
}
/*
* Now search the next chunk. It must be there, because the
* next chunk would have been freed if it were empty.
* If there is no next chunk, return NULL.
*/
if (licp->lic_next == NULL) {
return NULL;
}
licp = licp->lic_next;
ASSERT(!xfs_lic_are_all_free(licp));
for (i = 0; i < licp->lic_unused; i++) {
if (xfs_lic_isfree(licp, i)) {
continue;
}
return xfs_lic_slot(licp, i);
}
ASSERT(0);
/* NOTREACHED */
return NULL; /* keep gcc quite */
}
/*
* This is called to unlock all of the items of a transaction and to free
* all the descriptors of that transaction.
*
* It walks the list of descriptors and unlocks each item. It frees
* each chunk except that embedded in the transaction as it goes along.
*/
void
xfs_trans_free_items(
xfs_trans_t *tp,
xfs_lsn_t commit_lsn,
int flags)
{
xfs_log_item_chunk_t *licp;
xfs_log_item_chunk_t *next_licp;
int abort;
abort = flags & XFS_TRANS_ABORT;
licp = &tp->t_items;
/*
* Special case the embedded chunk so we don't free it below.
*/
if (!xfs_lic_are_all_free(licp)) {
(void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn);
xfs_lic_all_free(licp);
licp->lic_unused = 0;
}
licp = licp->lic_next;
/*
* Unlock each item in each chunk and free the chunks.
*/
while (licp != NULL) {
ASSERT(!xfs_lic_are_all_free(licp));
(void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn);
next_licp = licp->lic_next;
kmem_free(licp);
licp = next_licp;
}
/*
* Reset the transaction structure's free item count.
*/
tp->t_items_free = XFS_LIC_NUM_SLOTS;
tp->t_items.lic_next = NULL;
}
/*
* This is called to unlock the items associated with a transaction.
* Items which were not logged should be freed.
* Those which were logged must still be tracked so they can be unpinned
* when the transaction commits.
*/
void
xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
{
xfs_log_item_chunk_t *licp;
xfs_log_item_chunk_t *next_licp;
xfs_log_item_chunk_t **licpp;
int freed;
freed = 0;
licp = &tp->t_items;
/*
* Special case the embedded chunk so we don't free.
*/
if (!xfs_lic_are_all_free(licp)) {
freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
}
licpp = &(tp->t_items.lic_next);
licp = licp->lic_next;
/*
* Unlock each item in each chunk, free non-dirty descriptors,
* and free empty chunks.
*/
while (licp != NULL) {
ASSERT(!xfs_lic_are_all_free(licp));
freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
next_licp = licp->lic_next;
if (xfs_lic_are_all_free(licp)) {
*licpp = next_licp;
kmem_free(licp);
freed -= XFS_LIC_NUM_SLOTS;
} else {
licpp = &(licp->lic_next);
}
ASSERT(*licpp == next_licp);
licp = next_licp;
}
/*
* Fix the free descriptor count in the transaction.
*/
tp->t_items_free += freed;
}
/*
* Unlock each item pointed to by a descriptor in the given chunk.
* Stamp the commit lsn into each item if necessary.
* Free descriptors pointing to items which are not dirty if freeing_chunk
* is zero. If freeing_chunk is non-zero, then we need to unlock all
* items in the chunk.
*
* Return the number of descriptors freed.
*/
STATIC int
xfs_trans_unlock_chunk(
xfs_log_item_chunk_t *licp,
int freeing_chunk,
int abort,
xfs_lsn_t commit_lsn)
{
xfs_log_item_desc_t *lidp;
xfs_log_item_t *lip;
int i;
int freed;
freed = 0;
lidp = licp->lic_descs;
for (i = 0; i < licp->lic_unused; i++, lidp++) {
if (xfs_lic_isfree(licp, i)) {
continue;
}
lip = lidp->lid_item;
lip->li_desc = NULL;
if (commit_lsn != NULLCOMMITLSN)
IOP_COMMITTING(lip, commit_lsn);
if (abort)
lip->li_flags |= XFS_LI_ABORTED;
IOP_UNLOCK(lip);
/*
* Free the descriptor if the item is not dirty
* within this transaction and the caller is not
* going to just free the entire thing regardless.
*/
if (!(freeing_chunk) &&
(!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
xfs_lic_relse(licp, i);
freed++;
}
}
return freed;
}

View File

@ -23,22 +23,8 @@ struct xfs_log_item_desc;
struct xfs_mount;
struct xfs_trans;
/*
* From xfs_trans_item.c
*/
struct xfs_log_item_desc *xfs_trans_add_item(struct xfs_trans *,
struct xfs_log_item *);
void xfs_trans_free_item(struct xfs_trans *,
struct xfs_log_item_desc *);
struct xfs_log_item_desc *xfs_trans_find_item(struct xfs_trans *,
struct xfs_log_item *);
struct xfs_log_item_desc *xfs_trans_first_item(struct xfs_trans *);
struct xfs_log_item_desc *xfs_trans_next_item(struct xfs_trans *,
struct xfs_log_item_desc *);
void xfs_trans_unlock_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn);
void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
int flags);
void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
void xfs_trans_del_item(struct xfs_log_item *);
void xfs_trans_item_committed(struct xfs_log_item *lip,
xfs_lsn_t commit_lsn, int aborted);

View File

@ -25,18 +25,14 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_rw.h"
#include "xfs_itable.h"
#include "xfs_utils.h"
@ -324,86 +320,3 @@ xfs_bumplink(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
return 0;
}
/*
* Try to truncate the given file to 0 length. Currently called
* only out of xfs_remove when it has to truncate a file to free
* up space for the remove to proceed.
*/
int
xfs_truncate_file(
xfs_mount_t *mp,
xfs_inode_t *ip)
{
xfs_trans_t *tp;
int error;
#ifdef QUOTADEBUG
/*
* This is called to truncate the quotainodes too.
*/
if (XFS_IS_UQUOTA_ON(mp)) {
if (ip->i_ino != mp->m_sb.sb_uquotino)
ASSERT(ip->i_udquot);
}
if (XFS_IS_OQUOTA_ON(mp)) {
if (ip->i_ino != mp->m_sb.sb_gquotino)
ASSERT(ip->i_gdquot);
}
#endif
/*
* Make the call to xfs_itruncate_start before starting the
* transaction, because we cannot make the call while we're
* in a transaction.
*/
xfs_ilock(ip, XFS_IOLOCK_EXCL);
error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_ITRUNCATE_LOG_COUNT))) {
xfs_trans_cancel(tp, 0);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
/*
* Follow the normal truncate locking protocol. Since we
* hold the inode in the transaction, we know that its number
* of references will stay constant.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ihold(tp, ip);
/*
* Signal a sync xaction. The only case where that isn't
* the case is if we're truncating an already unlinked file
* on a wsync fs. In that case, we know the blocks can't
* reappear in the file because the links to file are
* permanently toast. Currently, we're always going to
* want a sync transaction because this code is being
* called from places where nlink is guaranteed to be 1
* but I'm leaving the tests in to protect against future
* changes -- rcc.
*/
error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0,
XFS_DATA_FORK,
((ip->i_d.di_nlink != 0 ||
!(mp->m_flags & XFS_MOUNT_WSYNC))
? 1 : 0));
if (error) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
} else {
xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
return error;
}

View File

@ -18,7 +18,6 @@
#ifndef __XFS_UTILS_H__
#define __XFS_UTILS_H__
extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
xfs_dev_t, cred_t *, prid_t, int,
xfs_inode_t **, int *);

View File

@ -26,19 +26,14 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_itable.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
@ -73,7 +68,7 @@ xfs_setattr(
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int need_iolock = 1;
xfs_itrace_entry(ip);
trace_xfs_setattr(ip);
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
@ -143,16 +138,6 @@ xfs_setattr(
goto error_return;
}
} else {
if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
!(flags & XFS_ATTR_DMI)) {
int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip,
iattr->ia_size, 0, dmflags, NULL);
if (code) {
lock_flags = 0;
goto error_return;
}
}
if (need_iolock)
lock_flags |= XFS_IOLOCK_EXCL;
}
@ -283,8 +268,7 @@ xfs_setattr(
commit_flags = XFS_TRANS_RELEASE_LOG_RES;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, lock_flags);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
/*
* Only change the c/mtime if we are changing the size
@ -334,8 +318,7 @@ xfs_setattr(
xfs_iflags_set(ip, XFS_ITRUNCATED);
}
} else if (tp) {
xfs_trans_ijoin(tp, ip, lock_flags);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
}
/*
@ -470,17 +453,10 @@ xfs_setattr(
return XFS_ERROR(code);
}
if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
!(flags & XFS_ATTR_DMI)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL, NULL, NULL,
0, 0, AT_DELAY_FLAG(flags));
}
return 0;
abort_return:
commit_flags |= XFS_TRANS_ABORT;
/* FALLTHROUGH */
error_return:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@ -516,7 +492,7 @@ xfs_readlink_bmap(
int error = 0;
error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0,
mval, &nmaps, NULL, NULL);
mval, &nmaps, NULL);
if (error)
goto out;
@ -557,7 +533,7 @@ xfs_readlink(
int pathlen;
int error = 0;
xfs_itrace_entry(ip);
trace_xfs_readlink(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@ -613,14 +589,14 @@ xfs_free_eofblocks(
*/
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
map_len = last_fsb - end_fsb;
if (map_len <= 0)
if (last_fsb <= end_fsb)
return 0;
map_len = last_fsb - end_fsb;
nimaps = 1;
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
NULL, 0, &imap, &nimaps, NULL, NULL);
NULL, 0, &imap, &nimaps, NULL);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!error && (nimaps != 0) &&
@ -675,10 +651,7 @@ xfs_free_eofblocks(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip,
XFS_IOLOCK_EXCL |
XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
error = xfs_itruncate_finish(&tp, ip,
ip->i_size,
@ -750,8 +723,7 @@ xfs_inactive_symlink_rmt(
xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
size = (int)ip->i_d.di_size;
ip->i_d.di_size = 0;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
* Find the block(s) so we can inval and unmap them.
@ -761,7 +733,7 @@ xfs_inactive_symlink_rmt(
nmaps = ARRAY_SIZE(mval);
if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
&free_list, NULL)))
&free_list)))
goto error0;
/*
* Invalidate the block(s).
@ -776,7 +748,7 @@ xfs_inactive_symlink_rmt(
* Unmap the dead block(s) to the free_list.
*/
if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
&first_block, &free_list, NULL, &done)))
&first_block, &free_list, &done)))
goto error1;
ASSERT(done);
/*
@ -795,8 +767,7 @@ xfs_inactive_symlink_rmt(
* Mark it dirty so it will be logged and moved forward in the log as
* part of every commit.
*/
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
* Get a new, empty transaction to return to our caller.
@ -929,8 +900,7 @@ xfs_inactive_attrs(
goto error_cancel;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
ASSERT(ip->i_d.di_anextents == 0);
@ -1035,8 +1005,6 @@ xfs_inactive(
int error;
int truncate;
xfs_itrace_entry(ip);
/*
* If the inode is already free, then there can be nothing
* to clean up here.
@ -1060,9 +1028,6 @@ xfs_inactive(
mp = ip->i_mount;
if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY))
XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL);
error = 0;
/* If this is a read-only mount, don't do this (would generate I/O) */
@ -1120,8 +1085,7 @@ xfs_inactive(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
/*
* normally, we have to run xfs_itruncate_finish sync.
@ -1154,8 +1118,7 @@ xfs_inactive(
return VN_INACTIVE_CACHE;
}
xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
} else {
error = xfs_trans_reserve(tp, 0,
XFS_IFREE_LOG_RES(mp),
@ -1168,8 +1131,7 @@ xfs_inactive(
}
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
}
/*
@ -1257,7 +1219,7 @@ xfs_lookup(
int error;
uint lock_mode;
xfs_itrace_entry(dp);
trace_xfs_lookup(dp, name);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
@ -1309,21 +1271,11 @@ xfs_create(
uint log_res;
uint log_count;
xfs_itrace_entry(dp);
trace_xfs_create(dp, name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
dp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, name->name, NULL,
mode, 0, 0);
if (error)
return error;
}
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
else
@ -1427,8 +1379,7 @@ xfs_create(
* the transaction cancel unlocking dp so don't do it explicitly in the
* error path.
*/
IHOLD(dp);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
@ -1487,16 +1438,7 @@ xfs_create(
xfs_qm_dqrele(gdqp);
*ipp = ip;
/* Fallthrough to std_return with error = 0 */
std_return:
if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, dp, DM_RIGHT_NULL,
ip, DM_RIGHT_NULL, name->name, NULL, mode,
error, 0);
}
return error;
return 0;
out_bmap_cancel:
xfs_bmap_cancel(&free_list);
@ -1510,8 +1452,8 @@ xfs_create(
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
goto std_return;
std_return:
return error;
out_abort_rele:
/*
@ -1726,20 +1668,11 @@ xfs_remove(
uint resblks;
uint log_count;
xfs_itrace_entry(dp);
xfs_itrace_entry(ip);
trace_xfs_remove(dp, name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL, name->name, NULL,
ip->i_d.di_mode, 0, 0);
if (error)
return error;
}
error = xfs_qm_dqattach(dp, 0);
if (error)
goto std_return;
@ -1782,15 +1715,8 @@ xfs_remove(
xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
/*
* At this point, we've gotten both the directory and the entry
* inodes locked.
*/
IHOLD(ip);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
IHOLD(dp);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
/*
* If we're removing a directory perform some additional validation.
@ -1877,21 +1803,15 @@ xfs_remove(
if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
xfs_filestream_deassociate(ip);
std_return:
if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL, name->name, NULL,
ip->i_d.di_mode, error, 0);
}
return error;
return 0;
out_bmap_cancel:
xfs_bmap_cancel(&free_list);
cancel_flags |= XFS_TRANS_ABORT;
out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
goto std_return;
std_return:
return error;
}
int
@ -1909,25 +1829,13 @@ xfs_link(
int committed;
int resblks;
xfs_itrace_entry(tdp);
xfs_itrace_entry(sip);
trace_xfs_link(tdp, target_name);
ASSERT(!S_ISDIR(sip->i_d.di_mode));
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
tdp, DM_RIGHT_NULL,
sip, DM_RIGHT_NULL,
target_name->name, NULL, 0, 0, 0);
if (error)
return error;
}
/* Return through std_return after this point. */
error = xfs_qm_dqattach(sip, 0);
if (error)
goto std_return;
@ -1953,15 +1861,8 @@ xfs_link(
xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
/*
* Increment vnode ref counts since xfs_trans_commit &
* xfs_trans_cancel will both unlock the inodes and
* decrement the associated ref counts.
*/
IHOLD(sip);
IHOLD(tdp);
xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, tdp, XFS_ILOCK_EXCL);
/*
* If the source has too many links, we can't make any more to it.
@ -2014,27 +1915,14 @@ xfs_link(
goto abort_return;
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
goto std_return;
/* Fall through to std_return with error = 0. */
std_return:
if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
tdp, DM_RIGHT_NULL,
sip, DM_RIGHT_NULL,
target_name->name, NULL, 0, error, 0);
}
return error;
return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
/* FALLTHROUGH */
error_return:
xfs_trans_cancel(tp, cancel_flags);
goto std_return;
std_return:
return error;
}
int
@ -2074,7 +1962,7 @@ xfs_symlink(
ip = NULL;
tp = NULL;
xfs_itrace_entry(dp);
trace_xfs_symlink(dp, link_name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@ -2086,17 +1974,6 @@ xfs_symlink(
if (pathlen >= MAXPATHLEN) /* total string too long */
return XFS_ERROR(ENAMETOOLONG);
if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
link_name->name,
(unsigned char *)target_path, 0, 0, 0);
if (error)
return error;
}
/* Return through std_return after this point. */
udqp = gdqp = NULL;
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
@ -2180,8 +2057,7 @@ xfs_symlink(
* transaction cancel unlocking dp so don't do it explicitly in the
* error path.
*/
IHOLD(dp);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE;
/*
@ -2215,7 +2091,7 @@ xfs_symlink(
error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
&first_block, resblks, mval, &nmaps,
&free_list, NULL);
&free_list);
if (error) {
goto error1;
}
@ -2278,21 +2154,8 @@ xfs_symlink(
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
/* Fall through to std_return with error = 0 or errno from
* xfs_trans_commit */
std_return:
if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
dp, DM_RIGHT_NULL,
error ? NULL : ip,
DM_RIGHT_NULL, link_name->name,
(unsigned char *)target_path,
0, error, 0);
}
if (!error)
*ipp = ip;
return error;
*ipp = ip;
return 0;
error2:
IRELE(ip);
@ -2306,8 +2169,8 @@ std_return:
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
goto std_return;
std_return:
return error;
}
int
@ -2333,13 +2196,12 @@ xfs_set_dmattrs(
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
ip->i_d.di_dmevmask = evmask;
ip->i_d.di_dmstate = state;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
IHOLD(ip);
error = xfs_trans_commit(tp, 0);
return error;
@ -2390,7 +2252,7 @@ xfs_alloc_file_space(
int committed;
int error;
xfs_itrace_entry(ip);
trace_xfs_alloc_file_space(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@ -2412,25 +2274,9 @@ xfs_alloc_file_space(
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
/* Generate a DMAPI event if needed. */
if (alloc_type != 0 && offset < ip->i_size &&
(attr_flags & XFS_ATTR_DMI) == 0 &&
DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
xfs_off_t end_dmi_offset;
end_dmi_offset = offset+len;
if (end_dmi_offset > ip->i_size)
end_dmi_offset = ip->i_size;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset,
end_dmi_offset - offset, 0, NULL);
if (error)
return error;
}
/*
* Allocate file space until done or until there is an error
*/
retry:
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
@ -2488,8 +2334,7 @@ retry:
if (error)
goto error1;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
/*
* Issue the xfs_bmapi() call to allocate the blocks
@ -2498,7 +2343,7 @@ retry:
error = xfs_bmapi(tp, ip, startoffset_fsb,
allocatesize_fsb, bmapi_flag,
&firstfsb, 0, imapp, &nimaps,
&free_list, NULL);
&free_list);
if (error) {
goto error0;
}
@ -2527,17 +2372,6 @@ retry:
startoffset_fsb += allocated_fsb;
allocatesize_fsb -= allocated_fsb;
}
dmapi_enospc_check:
if (error == ENOSPC && (attr_flags & XFS_ATTR_DMI) == 0 &&
DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
ip, DM_RIGHT_NULL,
ip, DM_RIGHT_NULL,
NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
if (error == 0)
goto retry; /* Maybe DMAPI app. has made space */
/* else fall through with error from XFS_SEND_DATA */
}
return error;
@ -2548,7 +2382,7 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto dmapi_enospc_check;
return error;
}
/*
@ -2598,7 +2432,7 @@ xfs_zero_remaining_bytes(
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1;
error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0,
NULL, 0, &imap, &nimap, NULL, NULL);
NULL, 0, &imap, &nimap, NULL);
if (error || nimap < 1)
break;
ASSERT(imap.br_blockcount >= 1);
@ -2661,7 +2495,6 @@ xfs_free_file_space(
{
int committed;
int done;
xfs_off_t end_dmi_offset;
xfs_fileoff_t endoffset_fsb;
int error;
xfs_fsblock_t firstfsb;
@ -2680,7 +2513,7 @@ xfs_free_file_space(
mp = ip->i_mount;
xfs_itrace_entry(ip);
trace_xfs_free_file_space(ip);
error = xfs_qm_dqattach(ip, 0);
if (error)
@ -2691,19 +2524,7 @@ xfs_free_file_space(
return error;
rt = XFS_IS_REALTIME_INODE(ip);
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
end_dmi_offset = offset + len;
endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
if (offset < ip->i_size && (attr_flags & XFS_ATTR_DMI) == 0 &&
DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
if (end_dmi_offset > ip->i_size)
end_dmi_offset = ip->i_size;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip,
offset, end_dmi_offset - offset,
AT_DELAY_FLAG(attr_flags), NULL);
if (error)
return error;
}
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
if (attr_flags & XFS_ATTR_NOLOCK)
need_iolock = 0;
@ -2731,7 +2552,7 @@ xfs_free_file_space(
if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
nimap = 1;
error = xfs_bmapi(NULL, ip, startoffset_fsb,
1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
1, 0, NULL, 0, &imap, &nimap, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
@ -2746,7 +2567,7 @@ xfs_free_file_space(
}
nimap = 1;
error = xfs_bmapi(NULL, ip, endoffset_fsb - 1,
1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
1, 0, NULL, 0, &imap, &nimap, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
@ -2814,8 +2635,7 @@ xfs_free_file_space(
if (error)
goto error1;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
/*
* issue the bunmapi() call to free the blocks
@ -2823,7 +2643,7 @@ xfs_free_file_space(
xfs_bmap_init(&free_list, &firstfsb);
error = xfs_bunmapi(tp, ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb,
0, 2, &firstfsb, &free_list, NULL, &done);
0, 2, &firstfsb, &free_list, &done);
if (error) {
goto error0;
}
@ -2883,8 +2703,6 @@ xfs_change_file_space(
xfs_trans_t *tp;
struct iattr iattr;
xfs_itrace_entry(ip);
if (!S_ISREG(ip->i_d.di_mode))
return XFS_ERROR(EINVAL);
@ -2985,8 +2803,7 @@ xfs_change_file_space(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_ijoin(tp, ip);
if ((attr_flags & XFS_ATTR_DMI) == 0) {
ip->i_d.di_mode &= ~S_ISUID;

View File

@ -415,7 +415,8 @@ struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
ssize_t bytes, void *private, int ret,
bool is_async);
/*
* Attribute flags. These should be or-ed together to figure out what