remove SWRITE* I/O types
These flags aren't real I/O types, but tell ll_rw_block to always lock the buffer instead of giving up on a failed trylock. Instead add a new write_dirty_buffer helper that implements this semantic and use it from the existing SWRITE* callers. Note that the ll_rw_block code had a bug where it didn't promote WRITE_SYNC_PLUG properly, which this patch fixes. In the ufs code clean up the helper that used to call ll_rw_block to mirror sync_dirty_buffer, which is the function it implements for compound buffers. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
87e99511ea
commit
9cb569d601
52
fs/buffer.c
52
fs/buffer.c
|
@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
||||||
spin_unlock(lock);
|
spin_unlock(lock);
|
||||||
/*
|
/*
|
||||||
* Ensure any pending I/O completes so that
|
* Ensure any pending I/O completes so that
|
||||||
* ll_rw_block() actually writes the current
|
* write_dirty_buffer() actually writes the
|
||||||
* contents - it is a noop if I/O is still in
|
* current contents - it is a noop if I/O is
|
||||||
* flight on potentially older contents.
|
* still in flight on potentially older
|
||||||
|
* contents.
|
||||||
*/
|
*/
|
||||||
ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
|
write_dirty_buffer(bh, WRITE_SYNC_PLUG);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kick off IO for the previous mapping. Note
|
* Kick off IO for the previous mapping. Note
|
||||||
|
@ -2949,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ll_rw_block: low-level access to block devices (DEPRECATED)
|
* ll_rw_block: low-level access to block devices (DEPRECATED)
|
||||||
* @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
|
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
|
||||||
* @nr: number of &struct buffer_heads in the array
|
* @nr: number of &struct buffer_heads in the array
|
||||||
* @bhs: array of pointers to &struct buffer_head
|
* @bhs: array of pointers to &struct buffer_head
|
||||||
*
|
*
|
||||||
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
|
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
|
||||||
* requests an I/O operation on them, either a %READ or a %WRITE. The third
|
* requests an I/O operation on them, either a %READ or a %WRITE. The third
|
||||||
* %SWRITE is like %WRITE only we make sure that the *current* data in buffers
|
* %READA option is described in the documentation for generic_make_request()
|
||||||
* are sent to disk. The fourth %READA option is described in the documentation
|
* which ll_rw_block() calls.
|
||||||
* for generic_make_request() which ll_rw_block() calls.
|
|
||||||
*
|
*
|
||||||
* This function drops any buffer that it cannot get a lock on (with the
|
* This function drops any buffer that it cannot get a lock on (with the
|
||||||
* BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
|
* BH_Lock state bit), any buffer that appears to be clean when doing a write
|
||||||
* clean when doing a write request, and any buffer that appears to be
|
* request, and any buffer that appears to be up-to-date when doing read
|
||||||
* up-to-date when doing read request. Further it marks as clean buffers that
|
* request. Further it marks as clean buffers that are processed for
|
||||||
* are processed for writing (the buffer cache won't assume that they are
|
* writing (the buffer cache won't assume that they are actually clean
|
||||||
* actually clean until the buffer gets unlocked).
|
* until the buffer gets unlocked).
|
||||||
*
|
*
|
||||||
* ll_rw_block sets b_end_io to simple completion handler that marks
|
* ll_rw_block sets b_end_io to simple completion handler that marks
|
||||||
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
|
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
|
||||||
|
@ -2980,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
||||||
for (i = 0; i < nr; i++) {
|
for (i = 0; i < nr; i++) {
|
||||||
struct buffer_head *bh = bhs[i];
|
struct buffer_head *bh = bhs[i];
|
||||||
|
|
||||||
if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
|
if (!trylock_buffer(bh))
|
||||||
lock_buffer(bh);
|
|
||||||
else if (!trylock_buffer(bh))
|
|
||||||
continue;
|
continue;
|
||||||
|
if (rw == WRITE) {
|
||||||
if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
|
|
||||||
rw == SWRITE_SYNC_PLUG) {
|
|
||||||
if (test_clear_buffer_dirty(bh)) {
|
if (test_clear_buffer_dirty(bh)) {
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
if (rw == SWRITE_SYNC)
|
submit_bh(WRITE, bh);
|
||||||
submit_bh(WRITE_SYNC, bh);
|
|
||||||
else
|
|
||||||
submit_bh(WRITE, bh);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -3009,6 +3002,19 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ll_rw_block);
|
EXPORT_SYMBOL(ll_rw_block);
|
||||||
|
|
||||||
|
void write_dirty_buffer(struct buffer_head *bh, int rw)
|
||||||
|
{
|
||||||
|
lock_buffer(bh);
|
||||||
|
if (!test_clear_buffer_dirty(bh)) {
|
||||||
|
unlock_buffer(bh);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
|
get_bh(bh);
|
||||||
|
submit_bh(rw, bh);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(write_dirty_buffer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For a data-integrity writeout, we need to wait upon any in-progress I/O
|
* For a data-integrity writeout, we need to wait upon any in-progress I/O
|
||||||
* and then start new I/O and then wait upon it. The caller must have a ref on
|
* and then start new I/O and then wait upon it. The caller must have a ref on
|
||||||
|
|
|
@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
|
||||||
{
|
{
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
|
|
||||||
ll_rw_block(SWRITE, nr_bhs, bhs);
|
for (i = 0; i < nr_bhs; i++)
|
||||||
|
write_dirty_buffer(bhs[i], WRITE);
|
||||||
|
|
||||||
for (i = 0; i < nr_bhs; i++) {
|
for (i = 0; i < nr_bhs; i++) {
|
||||||
wait_on_buffer(bhs[i]);
|
wait_on_buffer(bhs[i]);
|
||||||
if (buffer_eopnotsupp(bhs[i])) {
|
if (buffer_eopnotsupp(bhs[i])) {
|
||||||
|
|
|
@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
ll_rw_block(SWRITE, *batch_count, bhs);
|
for (i = 0; i < *batch_count; i++)
|
||||||
|
write_dirty_buffer(bhs[i], WRITE);
|
||||||
|
|
||||||
for (i = 0; i < *batch_count; i++) {
|
for (i = 0; i < *batch_count; i++) {
|
||||||
struct buffer_head *bh = bhs[i];
|
struct buffer_head *bh = bhs[i];
|
||||||
clear_buffer_jwrite(bh);
|
clear_buffer_jwrite(bh);
|
||||||
|
|
|
@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait)
|
||||||
if (wait)
|
if (wait)
|
||||||
sync_dirty_buffer(bh);
|
sync_dirty_buffer(bh);
|
||||||
else
|
else
|
||||||
ll_rw_block(SWRITE, 1, &bh);
|
write_dirty_buffer(bh, WRITE);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* If we have just flushed the log (by marking s_start==0), then
|
/* If we have just flushed the log (by marking s_start==0), then
|
||||||
|
|
|
@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal,
|
||||||
set_buffer_jwrite(bh);
|
set_buffer_jwrite(bh);
|
||||||
BUFFER_TRACE(bh, "write");
|
BUFFER_TRACE(bh, "write");
|
||||||
set_buffer_dirty(bh);
|
set_buffer_dirty(bh);
|
||||||
ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
|
write_dirty_buffer(bh, write_op);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
|
for (i = 0; i < *batch_count; i++)
|
||||||
|
write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
|
||||||
|
|
||||||
for (i = 0; i < *batch_count; i++) {
|
for (i = 0; i < *batch_count; i++) {
|
||||||
struct buffer_head *bh = journal->j_chkpt_bhs[i];
|
struct buffer_head *bh = journal->j_chkpt_bhs[i];
|
||||||
clear_buffer_jwrite(bh);
|
clear_buffer_jwrite(bh);
|
||||||
|
|
|
@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ll_rw_block(SWRITE, 1, &bh);
|
write_dirty_buffer(bh, WRITE);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* If we have just flushed the log (by marking s_start==0), then
|
/* If we have just flushed the log (by marking s_start==0), then
|
||||||
|
|
|
@ -625,7 +625,7 @@ static void flush_descriptor(journal_t *journal,
|
||||||
set_buffer_jwrite(bh);
|
set_buffer_jwrite(bh);
|
||||||
BUFFER_TRACE(bh, "write");
|
BUFFER_TRACE(bh, "write");
|
||||||
set_buffer_dirty(bh);
|
set_buffer_dirty(bh);
|
||||||
ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
|
write_dirty_buffer(bh, write_op);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -2311,7 +2311,7 @@ static int journal_read_transaction(struct super_block *sb,
|
||||||
/* flush out the real blocks */
|
/* flush out the real blocks */
|
||||||
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
||||||
set_buffer_dirty(real_blocks[i]);
|
set_buffer_dirty(real_blocks[i]);
|
||||||
ll_rw_block(SWRITE, 1, real_blocks + i);
|
write_dirty_buffer(real_blocks[i], WRITE);
|
||||||
}
|
}
|
||||||
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
||||||
wait_on_buffer(real_blocks[i]);
|
wait_on_buffer(real_blocks[i]);
|
||||||
|
|
|
@ -114,10 +114,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
|
||||||
|
|
||||||
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
||||||
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer (UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
|
|
||||||
unlock_super (sb);
|
unlock_super (sb);
|
||||||
|
@ -207,10 +205,8 @@ do_more:
|
||||||
|
|
||||||
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
||||||
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer (UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (overflow) {
|
if (overflow) {
|
||||||
fragment += count;
|
fragment += count;
|
||||||
|
@ -558,10 +554,8 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
|
||||||
|
|
||||||
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
||||||
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer (UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
|
|
||||||
UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
|
UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
|
||||||
|
@ -680,10 +674,8 @@ cg_found:
|
||||||
succed:
|
succed:
|
||||||
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
||||||
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer (UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
|
|
||||||
result += cgno * uspi->s_fpg;
|
result += cgno * uspi->s_fpg;
|
||||||
|
|
|
@ -113,10 +113,8 @@ void ufs_free_inode (struct inode * inode)
|
||||||
|
|
||||||
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
||||||
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer (UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
|
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
unlock_super (sb);
|
unlock_super (sb);
|
||||||
|
@ -156,10 +154,8 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
|
||||||
|
|
||||||
fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
|
fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
|
||||||
ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer(UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
|
|
||||||
UFSD("EXIT\n");
|
UFSD("EXIT\n");
|
||||||
}
|
}
|
||||||
|
@ -290,10 +286,8 @@ cg_found:
|
||||||
}
|
}
|
||||||
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
ubh_mark_buffer_dirty (USPI_UBH(uspi));
|
||||||
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
|
||||||
if (sb->s_flags & MS_SYNCHRONOUS) {
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||||
ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ubh_wait_on_buffer (UCPI_UBH(ucpi));
|
|
||||||
}
|
|
||||||
sb->s_dirt = 1;
|
sb->s_dirt = 1;
|
||||||
|
|
||||||
inode->i_ino = cg * uspi->s_ipg + bit;
|
inode->i_ino = cg * uspi->s_ipg + bit;
|
||||||
|
|
|
@ -243,10 +243,8 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
|
||||||
ubh_bforget(ind_ubh);
|
ubh_bforget(ind_ubh);
|
||||||
ind_ubh = NULL;
|
ind_ubh = NULL;
|
||||||
}
|
}
|
||||||
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
|
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
|
||||||
ubh_ll_rw_block(SWRITE, ind_ubh);
|
ubh_sync_block(ind_ubh);
|
||||||
ubh_wait_on_buffer (ind_ubh);
|
|
||||||
}
|
|
||||||
ubh_brelse (ind_ubh);
|
ubh_brelse (ind_ubh);
|
||||||
|
|
||||||
UFSD("EXIT: ino %lu\n", inode->i_ino);
|
UFSD("EXIT: ino %lu\n", inode->i_ino);
|
||||||
|
@ -307,10 +305,8 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
|
||||||
ubh_bforget(dind_bh);
|
ubh_bforget(dind_bh);
|
||||||
dind_bh = NULL;
|
dind_bh = NULL;
|
||||||
}
|
}
|
||||||
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
|
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
|
||||||
ubh_ll_rw_block(SWRITE, dind_bh);
|
ubh_sync_block(dind_bh);
|
||||||
ubh_wait_on_buffer (dind_bh);
|
|
||||||
}
|
|
||||||
ubh_brelse (dind_bh);
|
ubh_brelse (dind_bh);
|
||||||
|
|
||||||
UFSD("EXIT: ino %lu\n", inode->i_ino);
|
UFSD("EXIT: ino %lu\n", inode->i_ino);
|
||||||
|
@ -367,10 +363,8 @@ static int ufs_trunc_tindirect(struct inode *inode)
|
||||||
ubh_bforget(tind_bh);
|
ubh_bforget(tind_bh);
|
||||||
tind_bh = NULL;
|
tind_bh = NULL;
|
||||||
}
|
}
|
||||||
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
|
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
|
||||||
ubh_ll_rw_block(SWRITE, tind_bh);
|
ubh_sync_block(tind_bh);
|
||||||
ubh_wait_on_buffer (tind_bh);
|
|
||||||
}
|
|
||||||
ubh_brelse (tind_bh);
|
ubh_brelse (tind_bh);
|
||||||
|
|
||||||
UFSD("EXIT: ino %lu\n", inode->i_ino);
|
UFSD("EXIT: ino %lu\n", inode->i_ino);
|
||||||
|
|
|
@ -113,21 +113,17 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
|
void ubh_sync_block(struct ufs_buffer_head *ubh)
|
||||||
{
|
{
|
||||||
if (!ubh)
|
if (ubh) {
|
||||||
return;
|
unsigned i;
|
||||||
|
|
||||||
ll_rw_block(rw, ubh->count, ubh->bh);
|
for (i = 0; i < ubh->count; i++)
|
||||||
}
|
write_dirty_buffer(ubh->bh[i], WRITE);
|
||||||
|
|
||||||
void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
|
for (i = 0; i < ubh->count; i++)
|
||||||
{
|
wait_on_buffer(ubh->bh[i]);
|
||||||
unsigned i;
|
}
|
||||||
if (!ubh)
|
|
||||||
return;
|
|
||||||
for ( i = 0; i < ubh->count; i++ )
|
|
||||||
wait_on_buffer (ubh->bh[i]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ubh_bforget (struct ufs_buffer_head * ubh)
|
void ubh_bforget (struct ufs_buffer_head * ubh)
|
||||||
|
|
|
@ -269,8 +269,7 @@ extern void ubh_brelse (struct ufs_buffer_head *);
|
||||||
extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
|
extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
|
||||||
extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
|
extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
|
||||||
extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
|
extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
|
||||||
extern void ubh_ll_rw_block(int, struct ufs_buffer_head *);
|
extern void ubh_sync_block(struct ufs_buffer_head *);
|
||||||
extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
|
|
||||||
extern void ubh_bforget (struct ufs_buffer_head *);
|
extern void ubh_bforget (struct ufs_buffer_head *);
|
||||||
extern int ubh_buffer_dirty (struct ufs_buffer_head *);
|
extern int ubh_buffer_dirty (struct ufs_buffer_head *);
|
||||||
#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
|
#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
|
||||||
|
|
|
@ -182,6 +182,7 @@ void __lock_buffer(struct buffer_head *bh);
|
||||||
void ll_rw_block(int, int, struct buffer_head * bh[]);
|
void ll_rw_block(int, int, struct buffer_head * bh[]);
|
||||||
int sync_dirty_buffer(struct buffer_head *bh);
|
int sync_dirty_buffer(struct buffer_head *bh);
|
||||||
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
|
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
|
||||||
|
void write_dirty_buffer(struct buffer_head *bh, int rw);
|
||||||
int submit_bh(int, struct buffer_head *);
|
int submit_bh(int, struct buffer_head *);
|
||||||
void write_boundary_block(struct block_device *bdev,
|
void write_boundary_block(struct block_device *bdev,
|
||||||
sector_t bblock, unsigned blocksize);
|
sector_t bblock, unsigned blocksize);
|
||||||
|
|
|
@ -125,9 +125,6 @@ struct inodes_stat_t {
|
||||||
* block layer could (in theory) choose to ignore this
|
* block layer could (in theory) choose to ignore this
|
||||||
* request if it runs into resource problems.
|
* request if it runs into resource problems.
|
||||||
* WRITE A normal async write. Device will be plugged.
|
* WRITE A normal async write. Device will be plugged.
|
||||||
* SWRITE Like WRITE, but a special case for ll_rw_block() that
|
|
||||||
* tells it to lock the buffer first. Normally a buffer
|
|
||||||
* must be locked before doing IO.
|
|
||||||
* WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
|
* WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
|
||||||
* the hint that someone will be waiting on this IO
|
* the hint that someone will be waiting on this IO
|
||||||
* shortly. The device must still be unplugged explicitly,
|
* shortly. The device must still be unplugged explicitly,
|
||||||
|
@ -138,9 +135,6 @@ struct inodes_stat_t {
|
||||||
* immediately after submission. The write equivalent
|
* immediately after submission. The write equivalent
|
||||||
* of READ_SYNC.
|
* of READ_SYNC.
|
||||||
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
|
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
|
||||||
* SWRITE_SYNC
|
|
||||||
* SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
|
|
||||||
* See SWRITE.
|
|
||||||
* WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
|
* WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
|
||||||
* previously submitted writes must be safely on storage
|
* previously submitted writes must be safely on storage
|
||||||
* before this one is started. Also guarantees that when
|
* before this one is started. Also guarantees that when
|
||||||
|
@ -155,7 +149,6 @@ struct inodes_stat_t {
|
||||||
#define READ 0
|
#define READ 0
|
||||||
#define WRITE RW_MASK
|
#define WRITE RW_MASK
|
||||||
#define READA RWA_MASK
|
#define READA RWA_MASK
|
||||||
#define SWRITE (WRITE | READA)
|
|
||||||
|
|
||||||
#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
|
#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
|
||||||
#define READ_META (READ | REQ_META)
|
#define READ_META (READ | REQ_META)
|
||||||
|
@ -165,8 +158,6 @@ struct inodes_stat_t {
|
||||||
#define WRITE_META (WRITE | REQ_META)
|
#define WRITE_META (WRITE | REQ_META)
|
||||||
#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
|
#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
|
||||||
REQ_HARDBARRIER)
|
REQ_HARDBARRIER)
|
||||||
#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
|
|
||||||
#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These aren't really reads or writes, they pass down information about
|
* These aren't really reads or writes, they pass down information about
|
||||||
|
|
Loading…
Reference in New Issue