[PATCH] jbd2: rename jbd2 symbols to avoid duplication of jbd symbols

Mingming Cao originally did this work, and Shaggy reproduced it using some
scripts from her.

Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Mingming Cao 2006-10-11 01:20:59 -07:00 committed by Linus Torvalds
parent 470decc613
commit f7f4bccb72
9 changed files with 671 additions and 671 deletions

View File

@ -2,6 +2,6 @@
# Makefile for the linux journaling routines.
#
obj-$(CONFIG_JBD) += jbd.o
obj-$(CONFIG_JBD2) += jbd2.o
jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
jbd2-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o

View File

@ -19,7 +19,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
@ -95,9 +95,9 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
ret = __journal_remove_checkpoint(jh) + 1;
ret = __jbd2_journal_remove_checkpoint(jh) + 1;
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
__brelse(bh);
} else {
@ -107,19 +107,19 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
}
/*
* __log_wait_for_space: wait until there is space in the journal.
* __jbd2_log_wait_for_space: wait until there is space in the journal.
*
* Called under j-state_lock *only*. It will be unlocked if we have to wait
* for a checkpoint to free up some space in the log.
*/
void __log_wait_for_space(journal_t *journal)
void __jbd2_log_wait_for_space(journal_t *journal)
{
int nblocks;
assert_spin_locked(&journal->j_state_lock);
nblocks = jbd_space_needed(journal);
while (__log_space_left(journal) < nblocks) {
if (journal->j_flags & JFS_ABORT)
while (__jbd2_log_space_left(journal) < nblocks) {
if (journal->j_flags & JBD2_ABORT)
return;
spin_unlock(&journal->j_state_lock);
mutex_lock(&journal->j_checkpoint_mutex);
@ -130,9 +130,9 @@ void __log_wait_for_space(journal_t *journal)
*/
spin_lock(&journal->j_state_lock);
nblocks = jbd_space_needed(journal);
if (__log_space_left(journal) < nblocks) {
if (__jbd2_log_space_left(journal) < nblocks) {
spin_unlock(&journal->j_state_lock);
log_do_checkpoint(journal);
jbd2_log_do_checkpoint(journal);
spin_lock(&journal->j_state_lock);
}
mutex_unlock(&journal->j_checkpoint_mutex);
@ -198,9 +198,9 @@ restart:
* Now in whatever state the buffer currently is, we know that
* it has been written out and so we can drop it from the list
*/
released = __journal_remove_checkpoint(jh);
released = __jbd2_journal_remove_checkpoint(jh);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
}
@ -252,16 +252,16 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
log_start_commit(journal, tid);
log_wait_commit(journal, tid);
jbd2_log_start_commit(journal, tid);
jbd2_log_wait_commit(journal, tid);
ret = 1;
} else if (!buffer_dirty(bh)) {
J_ASSERT_JH(jh, !buffer_jbddirty(bh));
BUFFER_TRACE(bh, "remove from checkpoint");
__journal_remove_checkpoint(jh);
__jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
__brelse(bh);
ret = 1;
} else {
@ -296,7 +296,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
*
* The journal should be locked before calling this function.
*/
int log_do_checkpoint(journal_t *journal)
int jbd2_log_do_checkpoint(journal_t *journal)
{
transaction_t *transaction;
tid_t this_tid;
@ -309,7 +309,7 @@ int log_do_checkpoint(journal_t *journal)
* don't need checkpointing, just eliminate them from the
* journal straight away.
*/
result = cleanup_journal_tail(journal);
result = jbd2_cleanup_journal_tail(journal);
jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
if (result <= 0)
return result;
@ -374,7 +374,7 @@ restart:
}
out:
spin_unlock(&journal->j_list_lock);
result = cleanup_journal_tail(journal);
result = jbd2_cleanup_journal_tail(journal);
if (result < 0)
return result;
return 0;
@ -397,7 +397,7 @@ out:
* we have an abort error outstanding.
*/
int cleanup_journal_tail(journal_t *journal)
int jbd2_cleanup_journal_tail(journal_t *journal)
{
transaction_t * transaction;
tid_t first_tid;
@ -452,8 +452,8 @@ int cleanup_journal_tail(journal_t *journal)
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
spin_unlock(&journal->j_state_lock);
if (!(journal->j_flags & JFS_ABORT))
journal_update_superblock(journal, 1);
if (!(journal->j_flags & JBD2_ABORT))
jbd2_journal_update_superblock(journal, 1);
return 0;
}
@ -518,7 +518,7 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
* Returns number of buffers reaped (for debug)
*/
int __journal_clean_checkpoint_list(journal_t *journal)
int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret = 0;
@ -578,7 +578,7 @@ out:
* This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
int __journal_remove_checkpoint(struct journal_head *jh)
int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
{
transaction_t *transaction;
journal_t *journal;
@ -607,7 +607,7 @@ int __journal_remove_checkpoint(struct journal_head *jh)
* dropped!
*
* The locking here around j_committing_transaction is a bit sleazy.
* See the comment at the end of journal_commit_transaction().
* See the comment at the end of jbd2_journal_commit_transaction().
*/
if (transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "belongs to committing transaction");
@ -617,7 +617,7 @@ int __journal_remove_checkpoint(struct journal_head *jh)
/* OK, that was the last buffer for the transaction: we can now
safely remove this transaction from the log */
__journal_drop_transaction(journal, transaction);
__jbd2_journal_drop_transaction(journal, transaction);
/* Just in case anybody was waiting for more transactions to be
checkpointed... */
@ -636,7 +636,7 @@ out:
* Called with the journal locked.
* Called with j_list_lock held.
*/
void __journal_insert_checkpoint(struct journal_head *jh,
void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
transaction_t *transaction)
{
JBUFFER_TRACE(jh, "entry");
@ -666,7 +666,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
* Called with j_list_lock held.
*/
void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
{
assert_spin_locked(&journal->j_list_lock);
if (transaction->t_cpnext) {

View File

@ -1,5 +1,5 @@
/*
* linux/fs/jbd/commit.c
* linux/fs/jbd2/commit.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
@ -15,7 +15,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
@ -111,7 +111,7 @@ static int journal_write_commit_record(journal_t *journal,
if (is_journal_aborted(journal))
return 0;
descriptor = journal_get_descriptor_buffer(journal);
descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor)
return 1;
@ -120,14 +120,14 @@ static int journal_write_commit_record(journal_t *journal,
/* AKPM: buglet - add `i' to tmp! */
for (i = 0; i < bh->b_size; i += 512) {
journal_header_t *tmp = (journal_header_t*)bh->b_data;
tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
}
JBUFFER_TRACE(descriptor, "write commit block");
set_buffer_dirty(bh);
if (journal->j_flags & JFS_BARRIER) {
if (journal->j_flags & JBD2_BARRIER) {
set_buffer_ordered(bh);
barrier_done = 1;
}
@ -145,7 +145,7 @@ static int journal_write_commit_record(journal_t *journal,
"disabling barriers\n",
bdevname(journal->j_dev, b));
spin_lock(&journal->j_state_lock);
journal->j_flags &= ~JFS_BARRIER;
journal->j_flags &= ~JBD2_BARRIER;
spin_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
@ -155,7 +155,7 @@ static int journal_write_commit_record(journal_t *journal,
ret = sync_dirty_buffer(bh);
}
put_bh(bh); /* One for getblk() */
journal_put_journal_head(descriptor);
jbd2_journal_put_journal_head(descriptor);
return (ret == -EIO);
}
@ -239,7 +239,7 @@ write_out_data:
if (locked && test_clear_buffer_dirty(bh)) {
BUFFER_TRACE(bh, "needs writeout, adding to array");
wbuf[bufs++] = bh;
__journal_file_buffer(jh, commit_transaction,
__jbd2_journal_file_buffer(jh, commit_transaction,
BJ_Locked);
jbd_unlock_bh_state(bh);
if (bufs == journal->j_wbufsize) {
@ -251,13 +251,13 @@ write_out_data:
}
else {
BUFFER_TRACE(bh, "writeout complete: unfile");
__journal_unfile_buffer(jh);
__jbd2_journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
if (locked)
unlock_buffer(bh);
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
/* Once for our safety reference, once for
* journal_remove_journal_head() */
* jbd2_journal_remove_journal_head() */
put_bh(bh);
put_bh(bh);
}
@ -272,12 +272,12 @@ write_out_data:
}
/*
* journal_commit_transaction
* jbd2_journal_commit_transaction
*
* The primary function for committing a transaction to the log. This
* function is called by the journal thread to begin a complete commit.
*/
void journal_commit_transaction(journal_t *journal)
void jbd2_journal_commit_transaction(journal_t *journal)
{
transaction_t *commit_transaction;
struct journal_head *jh, *new_jh, *descriptor;
@ -305,10 +305,10 @@ void journal_commit_transaction(journal_t *journal)
spin_unlock(&journal->j_list_lock);
#endif
/* Do we need to erase the effects of a prior journal_flush? */
if (journal->j_flags & JFS_FLUSHED) {
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
if (journal->j_flags & JBD2_FLUSHED) {
jbd_debug(3, "super block updated\n");
journal_update_superblock(journal, 1);
jbd2_journal_update_superblock(journal, 1);
} else {
jbd_debug(3, "superblock not updated\n");
}
@ -350,7 +350,7 @@ void journal_commit_transaction(journal_t *journal)
* BJ_Reserved buffers. Note, it is _not_ permissible to assume
* that there are no such buffers: if a large filesystem
* operation like a truncate needs to split itself over multiple
* transactions, then it may try to do a journal_restart() while
* transactions, then it may try to do a jbd2_journal_restart() while
* there are still BJ_Reserved buffers outstanding. These must
* be released cleanly from the current transaction.
*
@ -358,25 +358,25 @@ void journal_commit_transaction(journal_t *journal)
* again before modifying the buffer in the new transaction, but
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple journal_get_write_access() calls to the same
* that multiple jbd2_journal_get_write_access() calls to the same
* buffer are perfectly permissable.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
JBUFFER_TRACE(jh, "reserved, unused: refile");
/*
* A journal_get_undo_access()+journal_release_buffer() may
* A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
* leave undo-committed data.
*/
if (jh->b_committed_data) {
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
jbd_slab_free(jh->b_committed_data, bh->b_size);
jbd2_slab_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
journal_refile_buffer(journal, jh);
jbd2_journal_refile_buffer(journal, jh);
}
/*
@ -385,7 +385,7 @@ void journal_commit_transaction(journal_t *journal)
* frees some memory
*/
spin_lock(&journal->j_list_lock);
__journal_clean_checkpoint_list(journal);
__jbd2_journal_clean_checkpoint_list(journal);
spin_unlock(&journal->j_list_lock);
jbd_debug (3, "JBD: commit phase 1\n");
@ -393,7 +393,7 @@ void journal_commit_transaction(journal_t *journal)
/*
* Switch to a new revoke table.
*/
journal_switch_revoke_table(journal);
jbd2_journal_switch_revoke_table(journal);
commit_transaction->t_state = T_FLUSH;
journal->j_committing_transaction = commit_transaction;
@ -450,9 +450,9 @@ void journal_commit_transaction(journal_t *journal)
continue;
}
if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
__journal_unfile_buffer(jh);
__jbd2_journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
put_bh(bh);
} else {
jbd_unlock_bh_state(bh);
@ -463,9 +463,9 @@ void journal_commit_transaction(journal_t *journal)
spin_unlock(&journal->j_list_lock);
if (err)
__journal_abort_hard(journal);
__jbd2_journal_abort_hard(journal);
journal_write_revoke_records(journal, commit_transaction);
jbd2_journal_write_revoke_records(journal, commit_transaction);
jbd_debug(3, "JBD: commit phase 2\n");
@ -499,7 +499,7 @@ void journal_commit_transaction(journal_t *journal)
if (is_journal_aborted(journal)) {
JBUFFER_TRACE(jh, "journal is aborting: refile");
journal_refile_buffer(journal, jh);
jbd2_journal_refile_buffer(journal, jh);
/* If that was the last one, we need to clean up
* any descriptor buffers which may have been
* already allocated, even if we are now
@ -519,9 +519,9 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug(4, "JBD: get descriptor\n");
descriptor = journal_get_descriptor_buffer(journal);
descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor) {
__journal_abort_hard(journal);
__jbd2_journal_abort_hard(journal);
continue;
}
@ -529,8 +529,8 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug(4, "JBD: got buffer %llu (%p)\n",
(unsigned long long)bh->b_blocknr, bh->b_data);
header = (journal_header_t *)&bh->b_data[0];
header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
tagp = &bh->b_data[sizeof(journal_header_t)];
@ -543,25 +543,25 @@ void journal_commit_transaction(journal_t *journal)
/* Record it so that we can wait for IO
completion later */
BUFFER_TRACE(bh, "ph3: file as descriptor");
journal_file_buffer(descriptor, commit_transaction,
jbd2_journal_file_buffer(descriptor, commit_transaction,
BJ_LogCtl);
}
/* Where is the buffer to be written? */
err = journal_next_log_block(journal, &blocknr);
err = jbd2_journal_next_log_block(journal, &blocknr);
/* If the block mapping failed, just abandon the buffer
and repeat this loop: we'll fall into the
refile-on-abort condition above. */
if (err) {
__journal_abort_hard(journal);
__jbd2_journal_abort_hard(journal);
continue;
}
/*
* start_this_handle() uses t_outstanding_credits to determine
* the free space in the log, but this counter is changed
* by journal_next_log_block() also.
* by jbd2_journal_next_log_block() also.
*/
commit_transaction->t_outstanding_credits--;
@ -576,13 +576,13 @@ void journal_commit_transaction(journal_t *journal)
set_bit(BH_JWrite, &jh2bh(jh)->b_state);
/*
* akpm: journal_write_metadata_buffer() sets
* akpm: jbd2_journal_write_metadata_buffer() sets
* new_bh->b_transaction to commit_transaction.
* We need to clean this up before we release new_bh
* (which is of type BJ_IO)
*/
JBUFFER_TRACE(jh, "ph3: write metadata");
flags = journal_write_metadata_buffer(commit_transaction,
flags = jbd2_journal_write_metadata_buffer(commit_transaction,
jh, &new_jh, blocknr);
set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
wbuf[bufs++] = jh2bh(new_jh);
@ -592,9 +592,9 @@ void journal_commit_transaction(journal_t *journal)
tag_flag = 0;
if (flags & 1)
tag_flag |= JFS_FLAG_ESCAPE;
tag_flag |= JBD2_FLAG_ESCAPE;
if (!first_tag)
tag_flag |= JFS_FLAG_SAME_UUID;
tag_flag |= JBD2_FLAG_SAME_UUID;
tag = (journal_block_tag_t *) tagp;
tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
@ -622,7 +622,7 @@ void journal_commit_transaction(journal_t *journal)
submitting the IOs. "tag" still points to
the last tag we set up. */
tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
start_journal_io:
for (i = 0; i < bufs; i++) {
@ -678,14 +678,14 @@ wait_for_iobuf:
clear_buffer_jwrite(bh);
JBUFFER_TRACE(jh, "ph4: unfile after journal write");
journal_unfile_buffer(journal, jh);
jbd2_journal_unfile_buffer(journal, jh);
/*
* ->t_iobuf_list should contain only dummy buffer_heads
* which were created by journal_write_metadata_buffer().
* which were created by jbd2_journal_write_metadata_buffer().
*/
BUFFER_TRACE(bh, "dumping temporary bh");
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
__brelse(bh);
J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
free_buffer_head(bh);
@ -702,7 +702,7 @@ wait_for_iobuf:
we finally commit, we can do any checkpointing
required. */
JBUFFER_TRACE(jh, "file as BJ_Forget");
journal_file_buffer(jh, commit_transaction, BJ_Forget);
jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
/* Wake up any transactions which were waiting for this
IO to complete */
wake_up_bit(&bh->b_state, BH_Unshadow);
@ -733,8 +733,8 @@ wait_for_iobuf:
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
journal_unfile_buffer(journal, jh);
journal_put_journal_head(jh);
jbd2_journal_unfile_buffer(journal, jh);
jbd2_journal_put_journal_head(jh);
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@ -745,7 +745,7 @@ wait_for_iobuf:
err = -EIO;
if (err)
__journal_abort_hard(journal);
__jbd2_journal_abort_hard(journal);
/* End of a transaction! Finally, we can do checkpoint
processing: any buffers committed as a result of this
@ -789,14 +789,14 @@ restart_loop:
* Otherwise, we can just throw away the frozen data now.
*/
if (jh->b_committed_data) {
jbd_slab_free(jh->b_committed_data, bh->b_size);
jbd2_slab_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL;
}
} else if (jh->b_frozen_data) {
jbd_slab_free(jh->b_frozen_data, bh->b_size);
jbd2_slab_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL;
}
@ -804,12 +804,12 @@ restart_loop:
cp_transaction = jh->b_cp_transaction;
if (cp_transaction) {
JBUFFER_TRACE(jh, "remove from old cp transaction");
__journal_remove_checkpoint(jh);
__jbd2_journal_remove_checkpoint(jh);
}
/* Only re-checkpoint the buffer_head if it is marked
* dirty. If the buffer was added to the BJ_Forget list
* by journal_forget, it may no longer be dirty and
* by jbd2_journal_forget, it may no longer be dirty and
* there's no point in keeping a checkpoint record for
* it. */
@ -828,9 +828,9 @@ restart_loop:
if (buffer_jbddirty(bh)) {
JBUFFER_TRACE(jh, "add to new checkpointing trans");
__journal_insert_checkpoint(jh, commit_transaction);
__jbd2_journal_insert_checkpoint(jh, commit_transaction);
JBUFFER_TRACE(jh, "refile for checkpoint writeback");
__journal_refile_buffer(jh);
__jbd2_journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
@ -842,11 +842,11 @@ restart_loop:
* disk and before we process the buffer on BJ_Forget
* list. */
JBUFFER_TRACE(jh, "refile or unfile freed buffer");
__journal_refile_buffer(jh);
__jbd2_journal_refile_buffer(jh);
if (!jh->b_transaction) {
jbd_unlock_bh_state(bh);
/* needs a brelse */
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
release_buffer_page(bh);
} else
jbd_unlock_bh_state(bh);
@ -856,9 +856,9 @@ restart_loop:
spin_unlock(&journal->j_list_lock);
/*
* This is a bit sleazy. We borrow j_list_lock to protect
* journal->j_committing_transaction in __journal_remove_checkpoint.
* Really, __journal_remove_checkpoint should be using j_state_lock but
* it's a bit hassle to hold that across __journal_remove_checkpoint
* journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
* Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
* it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
@ -885,7 +885,7 @@ restart_loop:
spin_unlock(&journal->j_state_lock);
if (commit_transaction->t_checkpoint_list == NULL) {
__journal_drop_transaction(journal, commit_transaction);
__jbd2_journal_drop_transaction(journal, commit_transaction);
} else {
if (journal->j_checkpoint_transactions == NULL) {
journal->j_checkpoint_transactions = commit_transaction;

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@
#else
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#endif
@ -86,7 +86,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
nbufs = 0;
for (next = start; next < max; next++) {
err = journal_bmap(journal, next, &blocknr);
err = jbd2_journal_bmap(journal, next, &blocknr);
if (err) {
printk (KERN_ERR "JBD: bad block at offset %u\n",
@ -142,7 +142,7 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
return -EIO;
}
err = journal_bmap(journal, offset, &blocknr);
err = jbd2_journal_bmap(journal, offset, &blocknr);
if (err) {
printk (KERN_ERR "JBD: bad block at offset %u\n",
@ -191,10 +191,10 @@ static int count_tags(struct buffer_head *bh, int size)
nr++;
tagp += sizeof(journal_block_tag_t);
if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
tagp += 16;
if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
break;
}
@ -210,7 +210,7 @@ do { \
} while (0)
/**
* journal_recover - recovers a on-disk journal
* jbd2_journal_recover - recovers a on-disk journal
* @journal: the journal to recover
*
* The primary function for recovering the log contents when mounting a
@ -221,7 +221,7 @@ do { \
* blocks. In the third and final pass, we replay any un-revoked blocks
* in the log.
*/
int journal_recover(journal_t *journal)
int jbd2_journal_recover(journal_t *journal)
{
int err;
journal_superblock_t * sb;
@ -260,13 +260,13 @@ int journal_recover(journal_t *journal)
* any existing commit records in the log. */
journal->j_transaction_sequence = ++info.end_transaction;
journal_clear_revoke(journal);
jbd2_journal_clear_revoke(journal);
sync_blockdev(journal->j_fs_dev);
return err;
}
/**
* journal_skip_recovery - Start journal and wipe exiting records
* jbd2_journal_skip_recovery - Start journal and wipe exiting records
* @journal: journal to startup
*
* Locate any valid recovery information from the journal and set up the
@ -278,7 +278,7 @@ int journal_recover(journal_t *journal)
* much recovery information is being erased, and to let us initialise
* the journal transaction sequence numbers to the next unused ID.
*/
int journal_skip_recovery(journal_t *journal)
int jbd2_journal_skip_recovery(journal_t *journal)
{
int err;
journal_superblock_t * sb;
@ -387,7 +387,7 @@ static int do_one_pass(journal_t *journal,
tmp = (journal_header_t *)bh->b_data;
if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
brelse(bh);
break;
}
@ -407,7 +407,7 @@ static int do_one_pass(journal_t *journal,
* to do with it? That depends on the pass... */
switch(blocktype) {
case JFS_DESCRIPTOR_BLOCK:
case JBD2_DESCRIPTOR_BLOCK:
/* If it is a valid descriptor block, replay it
* in pass REPLAY; otherwise, just skip over the
* blocks it describes. */
@ -451,7 +451,7 @@ static int do_one_pass(journal_t *journal,
/* If the block has been
* revoked, then we're all done
* here. */
if (journal_test_revoke
if (jbd2_journal_test_revoke
(journal, blocknr,
next_commit_ID)) {
brelse(obh);
@ -477,9 +477,9 @@ static int do_one_pass(journal_t *journal,
lock_buffer(nbh);
memcpy(nbh->b_data, obh->b_data,
journal->j_blocksize);
if (flags & JFS_FLAG_ESCAPE) {
if (flags & JBD2_FLAG_ESCAPE) {
*((__be32 *)bh->b_data) =
cpu_to_be32(JFS_MAGIC_NUMBER);
cpu_to_be32(JBD2_MAGIC_NUMBER);
}
BUFFER_TRACE(nbh, "marking dirty");
@ -495,17 +495,17 @@ static int do_one_pass(journal_t *journal,
skip_write:
tagp += sizeof(journal_block_tag_t);
if (!(flags & JFS_FLAG_SAME_UUID))
if (!(flags & JBD2_FLAG_SAME_UUID))
tagp += 16;
if (flags & JFS_FLAG_LAST_TAG)
if (flags & JBD2_FLAG_LAST_TAG)
break;
}
brelse(bh);
continue;
case JFS_COMMIT_BLOCK:
case JBD2_COMMIT_BLOCK:
/* Found an expected commit block: not much to
* do other than move on to the next sequence
* number. */
@ -513,7 +513,7 @@ static int do_one_pass(journal_t *journal,
next_commit_ID++;
continue;
case JFS_REVOKE_BLOCK:
case JBD2_REVOKE_BLOCK:
/* If we aren't in the REVOKE pass, then we can
* just skip over this block. */
if (pass != PASS_REVOKE) {
@ -570,11 +570,11 @@ static int do_one_pass(journal_t *journal,
static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
tid_t sequence, struct recovery_info *info)
{
journal_revoke_header_t *header;
jbd2_journal_revoke_header_t *header;
int offset, max;
header = (journal_revoke_header_t *) bh->b_data;
offset = sizeof(journal_revoke_header_t);
header = (jbd2_journal_revoke_header_t *) bh->b_data;
offset = sizeof(jbd2_journal_revoke_header_t);
max = be32_to_cpu(header->r_count);
while (offset < max) {
@ -583,7 +583,7 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
offset += 4;
err = journal_set_revoke(journal, blocknr, sequence);
err = jbd2_journal_set_revoke(journal, blocknr, sequence);
if (err)
return err;
++info->nr_revokes;

View File

@ -62,7 +62,7 @@
#else
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
@ -70,14 +70,14 @@
#include <linux/init.h>
#endif
static kmem_cache_t *revoke_record_cache;
static kmem_cache_t *revoke_table_cache;
static kmem_cache_t *jbd2_revoke_record_cache;
static kmem_cache_t *jbd2_revoke_table_cache;
/* Each revoke record represents one single revoked block. During
journal replay, this involves recording the transaction ID of the
last transaction to revoke this block. */
struct jbd_revoke_record_s
struct jbd2_revoke_record_s
{
struct list_head hash;
tid_t sequence; /* Used for recovery only */
@ -86,7 +86,7 @@ struct jbd_revoke_record_s
/* The revoke table is just a simple hash table of revoke records. */
struct jbd_revoke_table_s
struct jbd2_revoke_table_s
{
/* It is conceivable that we might want a larger hash table
* for recovery. Must be a power of two. */
@ -99,7 +99,7 @@ struct jbd_revoke_table_s
#ifdef __KERNEL__
static void write_one_revoke_record(journal_t *, transaction_t *,
struct journal_head **, int *,
struct jbd_revoke_record_s *);
struct jbd2_revoke_record_s *);
static void flush_descriptor(journal_t *, struct journal_head *, int);
#endif
@ -108,7 +108,7 @@ static void flush_descriptor(journal_t *, struct journal_head *, int);
/* Borrowed from buffer.c: this is a tried and tested block hash function */
static inline int hash(journal_t *journal, unsigned long block)
{
struct jbd_revoke_table_s *table = journal->j_revoke;
struct jbd2_revoke_table_s *table = journal->j_revoke;
int hash_shift = table->hash_shift;
return ((block << (hash_shift - 6)) ^
@ -120,10 +120,10 @@ static int insert_revoke_hash(journal_t *journal, unsigned long blocknr,
tid_t seq)
{
struct list_head *hash_list;
struct jbd_revoke_record_s *record;
struct jbd2_revoke_record_s *record;
repeat:
record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
if (!record)
goto oom;
@ -145,57 +145,57 @@ oom:
/* Find a revoke record in the journal's hash table. */
static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
unsigned long blocknr)
{
struct list_head *hash_list;
struct jbd_revoke_record_s *record;
struct jbd2_revoke_record_s *record;
hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
spin_lock(&journal->j_revoke_lock);
record = (struct jbd_revoke_record_s *) hash_list->next;
record = (struct jbd2_revoke_record_s *) hash_list->next;
while (&(record->hash) != hash_list) {
if (record->blocknr == blocknr) {
spin_unlock(&journal->j_revoke_lock);
return record;
}
record = (struct jbd_revoke_record_s *) record->hash.next;
record = (struct jbd2_revoke_record_s *) record->hash.next;
}
spin_unlock(&journal->j_revoke_lock);
return NULL;
}
int __init journal_init_revoke_caches(void)
int __init jbd2_journal_init_revoke_caches(void)
{
revoke_record_cache = kmem_cache_create("revoke_record",
sizeof(struct jbd_revoke_record_s),
jbd2_revoke_record_cache = kmem_cache_create("revoke_record",
sizeof(struct jbd2_revoke_record_s),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if (revoke_record_cache == 0)
if (jbd2_revoke_record_cache == 0)
return -ENOMEM;
revoke_table_cache = kmem_cache_create("revoke_table",
sizeof(struct jbd_revoke_table_s),
jbd2_revoke_table_cache = kmem_cache_create("revoke_table",
sizeof(struct jbd2_revoke_table_s),
0, 0, NULL, NULL);
if (revoke_table_cache == 0) {
kmem_cache_destroy(revoke_record_cache);
revoke_record_cache = NULL;
if (jbd2_revoke_table_cache == 0) {
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
return -ENOMEM;
}
return 0;
}
void journal_destroy_revoke_caches(void)
void jbd2_journal_destroy_revoke_caches(void)
{
kmem_cache_destroy(revoke_record_cache);
revoke_record_cache = NULL;
kmem_cache_destroy(revoke_table_cache);
revoke_table_cache = NULL;
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
kmem_cache_destroy(jbd2_revoke_table_cache);
jbd2_revoke_table_cache = NULL;
}
/* Initialise the revoke table for a given journal to a given size. */
int journal_init_revoke(journal_t *journal, int hash_size)
int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
{
int shift, tmp;
@ -206,7 +206,7 @@ int journal_init_revoke(journal_t *journal, int hash_size)
while((tmp >>= 1UL) != 0UL)
shift++;
journal->j_revoke_table[0] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!journal->j_revoke_table[0])
return -ENOMEM;
journal->j_revoke = journal->j_revoke_table[0];
@ -221,7 +221,7 @@ int journal_init_revoke(journal_t *journal, int hash_size)
journal->j_revoke->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!journal->j_revoke->hash_table) {
kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
journal->j_revoke = NULL;
return -ENOMEM;
}
@ -229,10 +229,10 @@ int journal_init_revoke(journal_t *journal, int hash_size)
for (tmp = 0; tmp < hash_size; tmp++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
journal->j_revoke_table[1] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!journal->j_revoke_table[1]) {
kfree(journal->j_revoke_table[0]->hash_table);
kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
return -ENOMEM;
}
@ -249,8 +249,8 @@ int journal_init_revoke(journal_t *journal, int hash_size)
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!journal->j_revoke->hash_table) {
kfree(journal->j_revoke_table[0]->hash_table);
kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
kmem_cache_free(revoke_table_cache, journal->j_revoke_table[1]);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
journal->j_revoke = NULL;
return -ENOMEM;
}
@ -265,9 +265,9 @@ int journal_init_revoke(journal_t *journal, int hash_size)
/* Destoy a journal's revoke table. The table must already be empty! */
void journal_destroy_revoke(journal_t *journal)
void jbd2_journal_destroy_revoke(journal_t *journal)
{
struct jbd_revoke_table_s *table;
struct jbd2_revoke_table_s *table;
struct list_head *hash_list;
int i;
@ -281,7 +281,7 @@ void journal_destroy_revoke(journal_t *journal)
}
kfree(table->hash_table);
kmem_cache_free(revoke_table_cache, table);
kmem_cache_free(jbd2_revoke_table_cache, table);
journal->j_revoke = NULL;
table = journal->j_revoke_table[1];
@ -294,7 +294,7 @@ void journal_destroy_revoke(journal_t *journal)
}
kfree(table->hash_table);
kmem_cache_free(revoke_table_cache, table);
kmem_cache_free(jbd2_revoke_table_cache, table);
journal->j_revoke = NULL;
}
@ -302,7 +302,7 @@ void journal_destroy_revoke(journal_t *journal)
#ifdef __KERNEL__
/*
* journal_revoke: revoke a given buffer_head from the journal. This
* jbd2_journal_revoke: revoke a given buffer_head from the journal. This
* prevents the block from being replayed during recovery if we take a
* crash after this current transaction commits. Any subsequent
* metadata writes of the buffer in this transaction cancel the
@ -314,18 +314,18 @@ void journal_destroy_revoke(journal_t *journal)
* revoke before clearing the block bitmap when we are deleting
* metadata.
*
* Revoke performs a journal_forget on any buffer_head passed in as a
* Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
* parameter, but does _not_ forget the buffer_head if the bh was only
* found implicitly.
*
* bh_in may not be a journalled buffer - it may have come off
* the hash tables without an attached journal_head.
*
* If bh_in is non-zero, journal_revoke() will decrement its b_count
* If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
* by one.
*/
int journal_revoke(handle_t *handle, unsigned long blocknr,
int jbd2_journal_revoke(handle_t *handle, unsigned long blocknr,
struct buffer_head *bh_in)
{
struct buffer_head *bh = NULL;
@ -338,7 +338,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
BUFFER_TRACE(bh_in, "enter");
journal = handle->h_transaction->t_journal;
if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
J_ASSERT (!"Cannot set revoke feature!");
return -EINVAL;
}
@ -386,8 +386,8 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
set_buffer_revoked(bh);
set_buffer_revokevalid(bh);
if (bh_in) {
BUFFER_TRACE(bh_in, "call journal_forget");
journal_forget(handle, bh_in);
BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
jbd2_journal_forget(handle, bh_in);
} else {
BUFFER_TRACE(bh, "call brelse");
__brelse(bh);
@ -403,7 +403,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
/*
* Cancel an outstanding revoke. For use only internally by the
* journaling code (called from journal_get_write_access).
* journaling code (called from jbd2_journal_get_write_access).
*
* We trust buffer_revoked() on the buffer if the buffer is already
* being journaled: if there is no revoke pending on the buffer, then we
@ -418,9 +418,9 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
*
* The caller must have the journal locked.
*/
int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
{
struct jbd_revoke_record_s *record;
struct jbd2_revoke_record_s *record;
journal_t *journal = handle->h_transaction->t_journal;
int need_cancel;
int did_revoke = 0; /* akpm: debug */
@ -447,7 +447,7 @@ int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
spin_lock(&journal->j_revoke_lock);
list_del(&record->hash);
spin_unlock(&journal->j_revoke_lock);
kmem_cache_free(revoke_record_cache, record);
kmem_cache_free(jbd2_revoke_record_cache, record);
did_revoke = 1;
}
}
@ -478,7 +478,7 @@ int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
* we do not want to suspend any processing until all revokes are
* written -bzzz
*/
void journal_switch_revoke_table(journal_t *journal)
void jbd2_journal_switch_revoke_table(journal_t *journal)
{
int i;
@ -498,12 +498,12 @@ void journal_switch_revoke_table(journal_t *journal)
* Called with the journal lock held.
*/
void journal_write_revoke_records(journal_t *journal,
void jbd2_journal_write_revoke_records(journal_t *journal,
transaction_t *transaction)
{
struct journal_head *descriptor;
struct jbd_revoke_record_s *record;
struct jbd_revoke_table_s *revoke;
struct jbd2_revoke_record_s *record;
struct jbd2_revoke_table_s *revoke;
struct list_head *hash_list;
int i, offset, count;
@ -519,14 +519,14 @@ void journal_write_revoke_records(journal_t *journal,
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
record = (struct jbd_revoke_record_s *)
record = (struct jbd2_revoke_record_s *)
hash_list->next;
write_one_revoke_record(journal, transaction,
&descriptor, &offset,
record);
count++;
list_del(&record->hash);
kmem_cache_free(revoke_record_cache, record);
kmem_cache_free(jbd2_revoke_record_cache, record);
}
}
if (descriptor)
@ -543,7 +543,7 @@ static void write_one_revoke_record(journal_t *journal,
transaction_t *transaction,
struct journal_head **descriptorp,
int *offsetp,
struct jbd_revoke_record_s *record)
struct jbd2_revoke_record_s *record)
{
struct journal_head *descriptor;
int offset;
@ -551,7 +551,7 @@ static void write_one_revoke_record(journal_t *journal,
/* If we are already aborting, this all becomes a noop. We
still need to go round the loop in
journal_write_revoke_records in order to free all of the
jbd2_journal_write_revoke_records in order to free all of the
revoke records: only the IO to the journal is omitted. */
if (is_journal_aborted(journal))
return;
@ -568,19 +568,19 @@ static void write_one_revoke_record(journal_t *journal,
}
if (!descriptor) {
descriptor = journal_get_descriptor_buffer(journal);
descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor)
return;
header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
header->h_sequence = cpu_to_be32(transaction->t_tid);
/* Record it so that we can wait for IO completion later */
JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
journal_file_buffer(descriptor, transaction, BJ_LogCtl);
jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
offset = sizeof(journal_revoke_header_t);
offset = sizeof(jbd2_journal_revoke_header_t);
*descriptorp = descriptor;
}
@ -601,7 +601,7 @@ static void flush_descriptor(journal_t *journal,
struct journal_head *descriptor,
int offset)
{
journal_revoke_header_t *header;
jbd2_journal_revoke_header_t *header;
struct buffer_head *bh = jh2bh(descriptor);
if (is_journal_aborted(journal)) {
@ -609,7 +609,7 @@ static void flush_descriptor(journal_t *journal,
return;
}
header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
header->r_count = cpu_to_be32(offset);
set_buffer_jwrite(bh);
BUFFER_TRACE(bh, "write");
@ -640,11 +640,11 @@ static void flush_descriptor(journal_t *journal,
* single block.
*/
int journal_set_revoke(journal_t *journal,
int jbd2_journal_set_revoke(journal_t *journal,
unsigned long blocknr,
tid_t sequence)
{
struct jbd_revoke_record_s *record;
struct jbd2_revoke_record_s *record;
record = find_revoke_record(journal, blocknr);
if (record) {
@ -664,11 +664,11 @@ int journal_set_revoke(journal_t *journal,
* ones, but later transactions still need replayed.
*/
int journal_test_revoke(journal_t *journal,
int jbd2_journal_test_revoke(journal_t *journal,
unsigned long blocknr,
tid_t sequence)
{
struct jbd_revoke_record_s *record;
struct jbd2_revoke_record_s *record;
record = find_revoke_record(journal, blocknr);
if (!record)
@ -683,21 +683,21 @@ int journal_test_revoke(journal_t *journal,
* that it can be reused by the running filesystem.
*/
void journal_clear_revoke(journal_t *journal)
void jbd2_journal_clear_revoke(journal_t *journal)
{
int i;
struct list_head *hash_list;
struct jbd_revoke_record_s *record;
struct jbd_revoke_table_s *revoke;
struct jbd2_revoke_record_s *record;
struct jbd2_revoke_table_s *revoke;
revoke = journal->j_revoke;
for (i = 0; i < revoke->hash_size; i++) {
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
record = (struct jbd_revoke_record_s*) hash_list->next;
record = (struct jbd2_revoke_record_s*) hash_list->next;
list_del(&record->hash);
kmem_cache_free(revoke_record_cache, record);
kmem_cache_free(jbd2_revoke_record_cache, record);
}
}
}

View File

@ -19,7 +19,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/timer.h>
@ -28,7 +28,7 @@
#include <linux/highmem.h>
/*
* get_transaction: obtain a new transaction_t object.
* jbd2_get_transaction: obtain a new transaction_t object.
*
* Simply allocate and initialise a new transaction. Create it in
* RUNNING state and add it to the current journal (which should not
@ -44,7 +44,7 @@
*/
static transaction_t *
get_transaction(journal_t *journal, transaction_t *transaction)
jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
{
transaction->t_journal = journal;
transaction->t_state = T_RUNNING;
@ -115,7 +115,7 @@ repeat:
spin_lock(&journal->j_state_lock);
repeat_locked:
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
(journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
spin_unlock(&journal->j_state_lock);
ret = -EROFS;
goto out;
@ -134,7 +134,7 @@ repeat_locked:
spin_unlock(&journal->j_state_lock);
goto alloc_transaction;
}
get_transaction(journal, new_transaction);
jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
@ -175,7 +175,7 @@ repeat_locked:
spin_unlock(&transaction->t_handle_lock);
prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
__log_start_commit(journal, transaction->t_tid);
__jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
@ -205,12 +205,12 @@ repeat_locked:
* committing_transaction->t_outstanding_credits plus "enough" for
* the log control blocks.
* Also, this test is inconsitent with the matching one in
* journal_extend().
* jbd2_journal_extend().
*/
if (__log_space_left(journal) < jbd_space_needed(journal)) {
if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
spin_unlock(&transaction->t_handle_lock);
__log_wait_for_space(journal);
__jbd2_log_wait_for_space(journal);
goto repeat_locked;
}
@ -223,7 +223,7 @@ repeat_locked:
transaction->t_handle_count++;
jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
handle, nblocks, transaction->t_outstanding_credits,
__log_space_left(journal));
__jbd2_log_space_left(journal));
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
out:
@ -246,7 +246,7 @@ static handle_t *new_handle(int nblocks)
}
/**
* handle_t *journal_start() - Obtain a new handle.
* handle_t *jbd2_journal_start() - Obtain a new handle.
* @journal: Journal to start transaction on.
* @nblocks: number of block buffer we might modify
*
@ -259,7 +259,7 @@ static handle_t *new_handle(int nblocks)
*
* Return a pointer to a newly allocated handle, or NULL on failure
*/
handle_t *journal_start(journal_t *journal, int nblocks)
handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
{
handle_t *handle = journal_current_handle();
int err;
@ -289,7 +289,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
}
/**
* int journal_extend() - extend buffer credits.
* int jbd2_journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
* @nblocks: nr blocks to try to extend by.
*
@ -298,7 +298,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
* a credit for a number of buffer modications in advance, but can
* extend its credit if it needs more.
*
* journal_extend tries to give the running handle more buffer credits.
* jbd2_journal_extend tries to give the running handle more buffer credits.
* It does not guarantee that allocation - this is a best-effort only.
* The calling process MUST be able to deal cleanly with a failure to
* extend here.
@ -308,7 +308,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
* return code < 0 implies an error
* return code > 0 implies normal transaction-full status.
*/
int journal_extend(handle_t *handle, int nblocks)
int jbd2_journal_extend(handle_t *handle, int nblocks)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@ -339,7 +339,7 @@ int journal_extend(handle_t *handle, int nblocks)
goto unlock;
}
if (wanted > __log_space_left(journal)) {
if (wanted > __jbd2_log_space_left(journal)) {
jbd_debug(3, "denied handle %p %d blocks: "
"insufficient log space\n", handle, nblocks);
goto unlock;
@ -360,21 +360,21 @@ out:
/**
* int journal_restart() - restart a handle .
* int jbd2_journal_restart() - restart a handle .
* @handle: handle to restart
* @nblocks: nr credits requested
*
* Restart a handle for a multi-transaction filesystem
* operation.
*
* If the journal_extend() call above fails to grant new buffer credits
* to a running handle, a call to journal_restart will commit the
* If the jbd2_journal_extend() call above fails to grant new buffer credits
* to a running handle, a call to jbd2_journal_restart will commit the
* handle's transaction so far and reattach the handle to a new
* transaction capabable of guaranteeing the requested number of
* credits.
*/
int journal_restart(handle_t *handle, int nblocks)
int jbd2_journal_restart(handle_t *handle, int nblocks)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@ -402,7 +402,7 @@ int journal_restart(handle_t *handle, int nblocks)
spin_unlock(&transaction->t_handle_lock);
jbd_debug(2, "restarting handle %p\n", handle);
__log_start_commit(journal, transaction->t_tid);
__jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
handle->h_buffer_credits = nblocks;
@ -412,7 +412,7 @@ int journal_restart(handle_t *handle, int nblocks)
/**
* void journal_lock_updates () - establish a transaction barrier.
* void jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
*
* This locks out any further updates from being started, and blocks
@ -421,7 +421,7 @@ int journal_restart(handle_t *handle, int nblocks)
*
* The journal lock should not be held on entry.
*/
void journal_lock_updates(journal_t *journal)
void jbd2_journal_lock_updates(journal_t *journal)
{
DEFINE_WAIT(wait);
@ -452,7 +452,7 @@ void journal_lock_updates(journal_t *journal)
/*
* We have now established a barrier against other normal updates, but
* we also need to barrier against other journal_lock_updates() calls
* we also need to barrier against other jbd2_journal_lock_updates() calls
* to make sure that we serialise special journal-locked operations
* too.
*/
@ -460,14 +460,14 @@ void journal_lock_updates(journal_t *journal)
}
/**
* void journal_unlock_updates (journal_t* journal) - release barrier
* void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
* @journal: Journal to release the barrier on.
*
* Release a transaction barrier obtained with journal_lock_updates().
* Release a transaction barrier obtained with jbd2_journal_lock_updates().
*
* Should be called without the journal lock held.
*/
void journal_unlock_updates (journal_t *journal)
void jbd2_journal_unlock_updates (journal_t *journal)
{
J_ASSERT(journal->j_barrier_count != 0);
@ -667,7 +667,7 @@ repeat:
JBUFFER_TRACE(jh, "allocate memory for buffer");
jbd_unlock_bh_state(bh);
frozen_buffer =
jbd_slab_alloc(jh2bh(jh)->b_size,
jbd2_slab_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
printk(KERN_EMERG
@ -699,7 +699,7 @@ repeat:
jh->b_transaction = transaction;
JBUFFER_TRACE(jh, "file as BJ_Reserved");
spin_lock(&journal->j_list_lock);
__journal_file_buffer(jh, transaction, BJ_Reserved);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
spin_unlock(&journal->j_list_lock);
}
@ -723,18 +723,18 @@ done:
* If we are about to journal a buffer, then any revoke pending on it is
* no longer valid
*/
journal_cancel_revoke(handle, jh);
jbd2_journal_cancel_revoke(handle, jh);
out:
if (unlikely(frozen_buffer)) /* It's usually NULL */
jbd_slab_free(frozen_buffer, bh->b_size);
jbd2_slab_free(frozen_buffer, bh->b_size);
JBUFFER_TRACE(jh, "exit");
return error;
}
/**
* int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
* int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
* @handle: transaction to add buffer modifications to
* @bh: bh to be used for metadata writes
* @credits: variable that will receive credits for the buffer
@ -745,16 +745,16 @@ out:
* because we're write()ing a buffer which is also part of a shared mapping.
*/
int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
{
struct journal_head *jh = journal_add_journal_head(bh);
struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int rc;
/* We do not want to get caught playing with fields which the
* log thread also manipulates. Make sure that the buffer
* completes any outstanding IO before proceeding. */
rc = do_get_write_access(handle, jh, 0);
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
return rc;
}
@ -772,17 +772,17 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
* unlocked buffer beforehand. */
/**
* int journal_get_create_access () - notify intent to use newly created bh
* int jbd2_journal_get_create_access () - notify intent to use newly created bh
* @handle: transaction to new buffer to
* @bh: new buffer.
*
* Call this if you create a new bh.
*/
int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
struct journal_head *jh = journal_add_journal_head(bh);
struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int err;
jbd_debug(5, "journal_head %p\n", jh);
@ -812,7 +812,7 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == NULL) {
jh->b_transaction = transaction;
JBUFFER_TRACE(jh, "file as BJ_Reserved");
__journal_file_buffer(jh, transaction, BJ_Reserved);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
} else if (jh->b_transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "set next transaction");
jh->b_next_transaction = transaction;
@ -828,14 +828,14 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* which hits an assertion error.
*/
JBUFFER_TRACE(jh, "cancelling revoke");
journal_cancel_revoke(handle, jh);
journal_put_journal_head(jh);
jbd2_journal_cancel_revoke(handle, jh);
jbd2_journal_put_journal_head(jh);
out:
return err;
}
/**
* int journal_get_undo_access() - Notify intent to modify metadata with
* int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
* non-rewindable consequences
* @handle: transaction
* @bh: buffer to undo
@ -848,7 +848,7 @@ out:
* since if we overwrote that space we would make the delete
* un-rewindable in case of a crash.
*
* To deal with that, journal_get_undo_access requests write access to a
* To deal with that, jbd2_journal_get_undo_access requests write access to a
* buffer for parts of non-rewindable operations such as delete
* operations on the bitmaps. The journaling code must keep a copy of
* the buffer's contents prior to the undo_access call until such time
@ -861,10 +861,10 @@ out:
*
* Returns error number or 0 on success.
*/
int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
{
int err;
struct journal_head *jh = journal_add_journal_head(bh);
struct journal_head *jh = jbd2_journal_add_journal_head(bh);
char *committed_data = NULL;
JBUFFER_TRACE(jh, "entry");
@ -880,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
repeat:
if (!jh->b_committed_data) {
committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n",
__FUNCTION__);
@ -905,14 +905,14 @@ repeat:
}
jbd_unlock_bh_state(bh);
out:
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
if (unlikely(committed_data))
jbd_slab_free(committed_data, bh->b_size);
jbd2_slab_free(committed_data, bh->b_size);
return err;
}
/**
* int journal_dirty_data() - mark a buffer as containing dirty data which
* int jbd2_journal_dirty_data() - mark a buffer as containing dirty data which
* needs to be flushed before we can commit the
* current transaction.
* @handle: transaction
@ -923,10 +923,10 @@ out:
*
* Returns error number or 0 on success.
*
* journal_dirty_data() can be called via page_launder->ext3_writepage
* jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
* by kswapd.
*/
int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
journal_t *journal = handle->h_transaction->t_journal;
int need_brelse = 0;
@ -935,7 +935,7 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
if (is_handle_aborted(handle))
return 0;
jh = journal_add_journal_head(bh);
jh = jbd2_journal_add_journal_head(bh);
JBUFFER_TRACE(jh, "entry");
/*
@ -984,7 +984,7 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
* And while we're in that state, someone does a
* writepage() in an attempt to pageout the same area
* of the file via a shared mapping. At present that
* calls journal_dirty_data(), and we get right here.
* calls jbd2_journal_dirty_data(), and we get right here.
* It may be too late to journal the data. Simply
* falling through to the next test will suffice: the
* data will be dirty and wil be checkpointed. The
@ -1035,7 +1035,7 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
/* journal_clean_data_list() may have got there first */
if (jh->b_transaction != NULL) {
JBUFFER_TRACE(jh, "unfile from commit");
__journal_temp_unlink_buffer(jh);
__jbd2_journal_temp_unlink_buffer(jh);
/* It still points to the committing
* transaction; move it to this one so
* that the refile assert checks are
@ -1054,15 +1054,15 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
JBUFFER_TRACE(jh, "not on correct data list: unfile");
J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
__journal_temp_unlink_buffer(jh);
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = handle->h_transaction;
JBUFFER_TRACE(jh, "file as data");
__journal_file_buffer(jh, handle->h_transaction,
__jbd2_journal_file_buffer(jh, handle->h_transaction,
BJ_SyncData);
}
} else {
JBUFFER_TRACE(jh, "not on a transaction");
__journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
__jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
}
no_journal:
spin_unlock(&journal->j_list_lock);
@ -1072,12 +1072,12 @@ no_journal:
__brelse(bh);
}
JBUFFER_TRACE(jh, "exit");
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
return 0;
}
/**
* int journal_dirty_metadata() - mark a buffer as containing dirty metadata
* int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
* @handle: transaction to add buffer to.
* @bh: buffer to mark
*
@ -1095,7 +1095,7 @@ no_journal:
* buffer: that only gets done when the old transaction finally
* completes its commit.
*/
int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@ -1156,7 +1156,7 @@ int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "file as BJ_Metadata");
spin_lock(&journal->j_list_lock);
__journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
__jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
spin_unlock(&journal->j_list_lock);
out_unlock_bh:
jbd_unlock_bh_state(bh);
@ -1166,18 +1166,18 @@ out:
}
/*
* journal_release_buffer: undo a get_write_access without any buffer
* jbd2_journal_release_buffer: undo a get_write_access without any buffer
* updates, if the update decided in the end that it didn't need access.
*
*/
void
journal_release_buffer(handle_t *handle, struct buffer_head *bh)
jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
{
BUFFER_TRACE(bh, "entry");
}
/**
* void journal_forget() - bforget() for potentially-journaled buffers.
* void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
* @handle: transaction handle
* @bh: bh to 'forget'
*
@ -1193,7 +1193,7 @@ journal_release_buffer(handle_t *handle, struct buffer_head *bh)
* Allow this call even if the handle has aborted --- it may be part of
* the caller's cleanup after an abort.
*/
int journal_forget (handle_t *handle, struct buffer_head *bh)
int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@ -1250,11 +1250,11 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
*/
if (jh->b_cp_transaction) {
__journal_temp_unlink_buffer(jh);
__journal_file_buffer(jh, transaction, BJ_Forget);
__jbd2_journal_temp_unlink_buffer(jh);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
} else {
__journal_unfile_buffer(jh);
journal_remove_journal_head(bh);
__jbd2_journal_unfile_buffer(jh);
jbd2_journal_remove_journal_head(bh);
__brelse(bh);
if (!buffer_jbd(bh)) {
spin_unlock(&journal->j_list_lock);
@ -1292,7 +1292,7 @@ drop:
}
/**
* int journal_stop() - complete a transaction
* int jbd2_journal_stop() - complete a transaction
* @handle: tranaction to complete.
*
* All done for a particular handle.
@ -1302,12 +1302,12 @@ drop:
* complication is that we need to start a commit operation if the
* filesystem is marked for synchronous update.
*
* journal_stop itself will not usually return an error, but it may
* jbd2_journal_stop itself will not usually return an error, but it may
* do so in unusual circumstances. In particular, expect it to
* return -EIO if a journal_abort has been executed since the
* return -EIO if a jbd2_journal_abort has been executed since the
* transaction began.
*/
int journal_stop(handle_t *handle)
int jbd2_journal_stop(handle_t *handle)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@ -1383,15 +1383,15 @@ int journal_stop(handle_t *handle)
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
__log_start_commit(journal, transaction->t_tid);
__jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
/*
* Special case: JFS_SYNC synchronous updates require us
* Special case: JBD2_SYNC synchronous updates require us
* to wait for the commit to complete.
*/
if (handle->h_sync && !(current->flags & PF_MEMALLOC))
err = log_wait_commit(journal, tid);
err = jbd2_log_wait_commit(journal, tid);
} else {
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
@ -1401,24 +1401,24 @@ int journal_stop(handle_t *handle)
return err;
}
/**int journal_force_commit() - force any uncommitted transactions
/**int jbd2_journal_force_commit() - force any uncommitted transactions
* @journal: journal to force
*
* For synchronous operations: force any uncommitted transactions
* to disk. May seem kludgy, but it reuses all the handle batching
* code in a very simple manner.
*/
int journal_force_commit(journal_t *journal)
int jbd2_journal_force_commit(journal_t *journal)
{
handle_t *handle;
int ret;
handle = journal_start(journal, 1);
handle = jbd2_journal_start(journal, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
} else {
handle->h_sync = 1;
ret = journal_stop(handle);
ret = jbd2_journal_stop(handle);
}
return ret;
}
@ -1486,7 +1486,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
*
* Called under j_list_lock. The journal may not be locked.
*/
void __journal_temp_unlink_buffer(struct journal_head *jh)
void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
{
struct journal_head **list = NULL;
transaction_t *transaction;
@ -1538,23 +1538,23 @@ void __journal_temp_unlink_buffer(struct journal_head *jh)
mark_buffer_dirty(bh); /* Expose it to the VM */
}
void __journal_unfile_buffer(struct journal_head *jh)
void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
__journal_temp_unlink_buffer(jh);
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
}
void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
{
jbd_lock_bh_state(jh2bh(jh));
spin_lock(&journal->j_list_lock);
__journal_unfile_buffer(jh);
__jbd2_journal_unfile_buffer(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(jh2bh(jh));
}
/*
* Called from journal_try_to_free_buffers().
* Called from jbd2_journal_try_to_free_buffers().
*
* Called under jbd_lock_bh_state(bh)
*/
@ -1576,16 +1576,16 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
/* A written-back ordered data buffer */
JBUFFER_TRACE(jh, "release data");
__journal_unfile_buffer(jh);
journal_remove_journal_head(bh);
__jbd2_journal_unfile_buffer(jh);
jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
} else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
/* written-back checkpointed metadata buffer */
if (jh->b_jlist == BJ_None) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
__journal_remove_checkpoint(jh);
journal_remove_journal_head(bh);
__jbd2_journal_remove_checkpoint(jh);
jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
}
@ -1596,7 +1596,7 @@ out:
/**
* int journal_try_to_free_buffers() - try to free page buffers.
* int jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
* @page: to try and free
* @unused_gfp_mask: unused
@ -1613,13 +1613,13 @@ out:
*
* This complicates JBD locking somewhat. We aren't protected by the
* BKL here. We wish to remove the buffer from its committing or
* running transaction's ->t_datalist via __journal_unfile_buffer.
* running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
*
* This may *change* the value of transaction_t->t_datalist, so anyone
* who looks at t_datalist needs to lock against this function.
*
* Even worse, someone may be doing a journal_dirty_data on this
* buffer. So we need to lock against that. journal_dirty_data()
* Even worse, someone may be doing a jbd2_journal_dirty_data on this
* buffer. So we need to lock against that. jbd2_journal_dirty_data()
* will come out of the lock with the buffer dirty, which makes it
* ineligible for release here.
*
@ -1629,7 +1629,7 @@ out:
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*/
int journal_try_to_free_buffers(journal_t *journal,
int jbd2_journal_try_to_free_buffers(journal_t *journal,
struct page *page, gfp_t unused_gfp_mask)
{
struct buffer_head *head;
@ -1646,15 +1646,15 @@ int journal_try_to_free_buffers(journal_t *journal,
/*
* We take our own ref against the journal_head here to avoid
* having to add tons of locking around each instance of
* journal_remove_journal_head() and journal_put_journal_head().
* jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head().
*/
jh = journal_grab_journal_head(bh);
jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
continue;
jbd_lock_bh_state(bh);
__journal_try_to_free_buffer(journal, bh);
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
jbd_unlock_bh_state(bh);
if (buffer_jbd(bh))
goto busy;
@ -1681,23 +1681,23 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
int may_free = 1;
struct buffer_head *bh = jh2bh(jh);
__journal_unfile_buffer(jh);
__jbd2_journal_unfile_buffer(jh);
if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction");
__journal_file_buffer(jh, transaction, BJ_Forget);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
clear_buffer_jbddirty(bh);
may_free = 0;
} else {
JBUFFER_TRACE(jh, "on running transaction");
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
return may_free;
}
/*
* journal_invalidatepage
* jbd2_journal_invalidatepage
*
* This code is tricky. It has a number of cases to deal with.
*
@ -1765,7 +1765,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
jh = journal_grab_journal_head(bh);
jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
goto zap_buffer_no_jh;
@ -1796,7 +1796,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
ret = __dispose_buffer(jh,
journal->j_running_transaction);
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
@ -1810,7 +1810,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
JBUFFER_TRACE(jh, "give to committing trans");
ret = __dispose_buffer(jh,
journal->j_committing_transaction);
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
@ -1844,7 +1844,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
journal->j_running_transaction);
jh->b_next_transaction = NULL;
}
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
@ -1861,7 +1861,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
}
zap_buffer:
journal_put_journal_head(jh);
jbd2_journal_put_journal_head(jh);
zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
@ -1877,7 +1877,7 @@ zap_buffer_unlocked:
}
/**
* void journal_invalidatepage()
* void jbd2_journal_invalidatepage()
* @journal: journal to use for flush...
* @page: page to flush
* @offset: length of page to invalidate.
@ -1885,7 +1885,7 @@ zap_buffer_unlocked:
* Reap page buffers containing data after offset in page.
*
*/
void journal_invalidatepage(journal_t *journal,
void jbd2_journal_invalidatepage(journal_t *journal,
struct page *page,
unsigned long offset)
{
@ -1927,7 +1927,7 @@ void journal_invalidatepage(journal_t *journal,
/*
* File a buffer on the given transaction list.
*/
void __journal_file_buffer(struct journal_head *jh,
void __jbd2_journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
{
struct journal_head **list = NULL;
@ -1956,7 +1956,7 @@ void __journal_file_buffer(struct journal_head *jh,
}
if (jh->b_transaction)
__journal_temp_unlink_buffer(jh);
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = transaction;
switch (jlist) {
@ -1998,12 +1998,12 @@ void __journal_file_buffer(struct journal_head *jh,
set_buffer_jbddirty(bh);
}
void journal_file_buffer(struct journal_head *jh,
void jbd2_journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
{
jbd_lock_bh_state(jh2bh(jh));
spin_lock(&transaction->t_journal->j_list_lock);
__journal_file_buffer(jh, transaction, jlist);
__jbd2_journal_file_buffer(jh, transaction, jlist);
spin_unlock(&transaction->t_journal->j_list_lock);
jbd_unlock_bh_state(jh2bh(jh));
}
@ -2018,7 +2018,7 @@ void journal_file_buffer(struct journal_head *jh,
*
* Called under jbd_lock_bh_state(jh2bh(jh))
*/
void __journal_refile_buffer(struct journal_head *jh)
void __jbd2_journal_refile_buffer(struct journal_head *jh)
{
int was_dirty;
struct buffer_head *bh = jh2bh(jh);
@ -2029,7 +2029,7 @@ void __journal_refile_buffer(struct journal_head *jh)
/* If the buffer is now unused, just drop it. */
if (jh->b_next_transaction == NULL) {
__journal_unfile_buffer(jh);
__jbd2_journal_unfile_buffer(jh);
return;
}
@ -2039,10 +2039,10 @@ void __journal_refile_buffer(struct journal_head *jh)
*/
was_dirty = test_clear_buffer_jbddirty(bh);
__journal_temp_unlink_buffer(jh);
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
__journal_file_buffer(jh, jh->b_transaction,
__jbd2_journal_file_buffer(jh, jh->b_transaction,
was_dirty ? BJ_Metadata : BJ_Reserved);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
@ -2054,26 +2054,26 @@ void __journal_refile_buffer(struct journal_head *jh)
* For the unlocked version of this call, also make sure that any
* hanging journal_head is cleaned up if necessary.
*
* __journal_refile_buffer is usually called as part of a single locked
* __jbd2_journal_refile_buffer is usually called as part of a single locked
* operation on a buffer_head, in which the caller is probably going to
* be hooking the journal_head onto other lists. In that case it is up
* to the caller to remove the journal_head if necessary. For the
* unlocked journal_refile_buffer call, the caller isn't going to be
* unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
* doing anything else to the buffer so we need to do the cleanup
* ourselves to avoid a jh leak.
*
* *** The journal_head may be freed by this call! ***
*/
void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
__journal_refile_buffer(jh);
__jbd2_journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
jbd2_journal_remove_journal_head(bh);
spin_unlock(&journal->j_list_lock);
__brelse(bh);

View File

@ -1,5 +1,5 @@
/*
* linux/include/linux/ext4_jbd.h
* linux/include/linux/ext4_jbd2.h
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
*
@ -16,7 +16,7 @@
#define _LINUX_EXT4_JBD_H
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal)
@ -116,7 +116,7 @@ static inline int
__ext4_journal_get_undo_access(const char *where, handle_t *handle,
struct buffer_head *bh)
{
int err = journal_get_undo_access(handle, bh);
int err = jbd2_journal_get_undo_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@ -126,7 +126,7 @@ static inline int
__ext4_journal_get_write_access(const char *where, handle_t *handle,
struct buffer_head *bh)
{
int err = journal_get_write_access(handle, bh);
int err = jbd2_journal_get_write_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@ -135,13 +135,13 @@ __ext4_journal_get_write_access(const char *where, handle_t *handle,
static inline void
ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
{
journal_release_buffer(handle, bh);
jbd2_journal_release_buffer(handle, bh);
}
static inline int
__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
{
int err = journal_forget(handle, bh);
int err = jbd2_journal_forget(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@ -151,7 +151,7 @@ static inline int
__ext4_journal_revoke(const char *where, handle_t *handle,
unsigned long blocknr, struct buffer_head *bh)
{
int err = journal_revoke(handle, blocknr, bh);
int err = jbd2_journal_revoke(handle, blocknr, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@ -161,7 +161,7 @@ static inline int
__ext4_journal_get_create_access(const char *where,
handle_t *handle, struct buffer_head *bh)
{
int err = journal_get_create_access(handle, bh);
int err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@ -171,7 +171,7 @@ static inline int
__ext4_journal_dirty_metadata(const char *where,
handle_t *handle, struct buffer_head *bh)
{
int err = journal_dirty_metadata(handle, bh);
int err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@ -211,22 +211,22 @@ static inline handle_t *ext4_journal_current_handle(void)
static inline int ext4_journal_extend(handle_t *handle, int nblocks)
{
return journal_extend(handle, nblocks);
return jbd2_journal_extend(handle, nblocks);
}
static inline int ext4_journal_restart(handle_t *handle, int nblocks)
{
return journal_restart(handle, nblocks);
return jbd2_journal_restart(handle, nblocks);
}
static inline int ext4_journal_blocks_per_page(struct inode *inode)
{
return journal_blocks_per_page(inode);
return jbd2_journal_blocks_per_page(inode);
}
static inline int ext4_journal_force_commit(journal_t *journal)
{
return journal_force_commit(journal);
return jbd2_journal_force_commit(journal);
}
/* super.c */

View File

@ -1,5 +1,5 @@
/*
* linux/include/linux/jbd.h
* linux/include/linux/jbd2.h
*
* Written by Stephen C. Tweedie <sct@redhat.com>
*
@ -19,7 +19,7 @@
/* Allow this file to be included directly into e2fsprogs */
#ifndef __KERNEL__
#include "jfs_compat.h"
#define JFS_DEBUG
#define JBD2_DEBUG
#define jfs_debug jbd_debug
#else
@ -57,11 +57,11 @@
* CONFIG_JBD_DEBUG is on.
*/
#define JBD_EXPENSIVE_CHECKING
extern int journal_enable_debug;
extern int jbd2_journal_enable_debug;
#define jbd_debug(n, f, a...) \
do { \
if ((n) <= journal_enable_debug) { \
if ((n) <= jbd2_journal_enable_debug) { \
printk (KERN_DEBUG "(%s, %d): %s: ", \
__FILE__, __LINE__, __FUNCTION__); \
printk (f, ## a); \
@ -71,16 +71,16 @@ extern int journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
extern void * jbd_slab_alloc(size_t size, gfp_t flags);
extern void jbd_slab_free(void *ptr, size_t size);
extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
extern void jbd2_slab_free(void *ptr, size_t size);
#define jbd_kmalloc(size, flags) \
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
__jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \
__jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
__jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
#define JFS_MIN_JOURNAL_BLOCKS 1024
#define JBD2_MIN_JOURNAL_BLOCKS 1024
#ifdef __KERNEL__
@ -122,7 +122,7 @@ typedef struct journal_s journal_t; /* Journal control structure */
* Internal structures used by the logging mechanism:
*/
#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
/*
* On-disk structures
@ -132,11 +132,11 @@ typedef struct journal_s journal_t; /* Journal control structure */
* Descriptor block types:
*/
#define JFS_DESCRIPTOR_BLOCK 1
#define JFS_COMMIT_BLOCK 2
#define JFS_SUPERBLOCK_V1 3
#define JFS_SUPERBLOCK_V2 4
#define JFS_REVOKE_BLOCK 5
#define JBD2_DESCRIPTOR_BLOCK 1
#define JBD2_COMMIT_BLOCK 2
#define JBD2_SUPERBLOCK_V1 3
#define JBD2_SUPERBLOCK_V2 4
#define JBD2_REVOKE_BLOCK 5
/*
* Standard header for all descriptor blocks:
@ -162,18 +162,18 @@ typedef struct journal_block_tag_s
* The revoke descriptor: used on disk to describe a series of blocks to
* be revoked from the log
*/
typedef struct journal_revoke_header_s
typedef struct jbd2_journal_revoke_header_s
{
journal_header_t r_header;
__be32 r_count; /* Count of bytes used in the block */
} journal_revoke_header_t;
} jbd2_journal_revoke_header_t;
/* Definitions for the journal tag flags word: */
#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */
#define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */
#define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
/*
@ -196,7 +196,7 @@ typedef struct journal_superblock_s
__be32 s_start; /* blocknr of start of log */
/* 0x0020 */
/* Error value, as set by journal_abort(). */
/* Error value, as set by jbd2_journal_abort(). */
__be32 s_errno;
/* 0x0024 */
@ -224,22 +224,22 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
#define JFS_HAS_COMPAT_FEATURE(j,mask) \
#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
/* Features known to this kernel version: */
#define JFS_KNOWN_COMPAT_FEATURES 0
#define JFS_KNOWN_ROCOMPAT_FEATURES 0
#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
#define JBD2_KNOWN_COMPAT_FEATURES 0
#define JBD2_KNOWN_ROCOMPAT_FEATURES 0
#define JBD2_KNOWN_INCOMPAT_FEATURES JBD2_FEATURE_INCOMPAT_REVOKE
#ifdef __KERNEL__
@ -359,7 +359,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
bit_spin_unlock(BH_JournalHead, &bh->b_state);
}
struct jbd_revoke_table_s;
struct jbd2_revoke_table_s;
/**
* struct handle_s - The handle_s type is the concrete type associated with
@ -445,7 +445,7 @@ struct transaction_s
/*
* Transaction's current state
* [no locking - only kjournald alters this]
* [no locking - only kjournald2 alters this]
* FIXME: needs barriers
* KLUDGE: [use j_state_lock]
*/
@ -621,7 +621,7 @@ struct transaction_s
* @j_revoke: The revoke table - maintains the list of revoked blocks in the
* current transaction.
* @j_revoke_table: alternate revoke tables for j_revoke
* @j_wbuf: array of buffer_heads for journal_commit_transaction
* @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
* @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
* number that will fit in j_blocksize
* @j_last_sync_writer: most recent pid which did a synchronous write
@ -805,11 +805,11 @@ struct journal_s
* current transaction. [j_revoke_lock]
*/
spinlock_t j_revoke_lock;
struct jbd_revoke_table_s *j_revoke;
struct jbd_revoke_table_s *j_revoke_table[2];
struct jbd2_revoke_table_s *j_revoke;
struct jbd2_revoke_table_s *j_revoke_table[2];
/*
* array of bhs for journal_commit_transaction
* array of bhs for jbd2_journal_commit_transaction
*/
struct buffer_head **j_wbuf;
int j_wbufsize;
@ -826,12 +826,12 @@ struct journal_s
/*
* Journal flag definitions
*/
#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
#define JFS_BARRIER 0x020 /* Use IDE barriers */
#define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */
#define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */
#define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */
#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
#define JBD2_BARRIER 0x020 /* Use IDE barriers */
/*
* Function declarations for the journaling transaction and buffer
@ -839,31 +839,31 @@ struct journal_s
*/
/* Filing buffers */
extern void __journal_temp_unlink_buffer(struct journal_head *jh);
extern void journal_unfile_buffer(journal_t *, struct journal_head *);
extern void __journal_unfile_buffer(struct journal_head *);
extern void __journal_refile_buffer(struct journal_head *);
extern void journal_refile_buffer(journal_t *, struct journal_head *);
extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_unfile_buffer(struct journal_head *);
extern void __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_free_buffer(struct journal_head *bh);
extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_clean_data_list(transaction_t *transaction);
/* Log buffer allocation */
extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
int journal_next_log_block(journal_t *, unsigned long *);
extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
int jbd2_journal_next_log_block(journal_t *, unsigned long *);
/* Commit management */
extern void journal_commit_transaction(journal_t *);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
int __journal_clean_checkpoint_list(journal_t *journal);
int __journal_remove_checkpoint(struct journal_head *);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
extern int
journal_write_metadata_buffer(transaction_t *transaction,
jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
struct journal_head **jh_out,
unsigned long blocknr);
@ -893,91 +893,91 @@ static inline handle_t *journal_current_handle(void)
* Register buffer modifications against the current transaction.
*/
extern handle_t *journal_start(journal_t *, int nblocks);
extern int journal_restart (handle_t *, int nblocks);
extern int journal_extend (handle_t *, int nblocks);
extern int journal_get_write_access(handle_t *, struct buffer_head *);
extern int journal_get_create_access (handle_t *, struct buffer_head *);
extern int journal_get_undo_access(handle_t *, struct buffer_head *);
extern int journal_dirty_data (handle_t *, struct buffer_head *);
extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
extern void journal_release_buffer (handle_t *, struct buffer_head *);
extern int journal_forget (handle_t *, struct buffer_head *);
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
extern int jbd2_journal_restart (handle_t *, int nblocks);
extern int jbd2_journal_extend (handle_t *, int nblocks);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
extern void journal_invalidatepage(journal_t *,
extern void jbd2_journal_invalidatepage(journal_t *,
struct page *, unsigned long);
extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int journal_stop(handle_t *);
extern int journal_flush (journal_t *);
extern void journal_lock_updates (journal_t *);
extern void journal_unlock_updates (journal_t *);
extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush (journal_t *);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
extern journal_t * journal_init_dev(struct block_device *bdev,
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
int start, int len, int bsize);
extern journal_t * journal_init_inode (struct inode *);
extern int journal_update_format (journal_t *);
extern int journal_check_used_features
extern journal_t * jbd2_journal_init_inode (struct inode *);
extern int jbd2_journal_update_format (journal_t *);
extern int jbd2_journal_check_used_features
(journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_check_available_features
extern int jbd2_journal_check_available_features
(journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_set_features
extern int jbd2_journal_set_features
(journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_create (journal_t *);
extern int journal_load (journal_t *journal);
extern void journal_destroy (journal_t *);
extern int journal_recover (journal_t *journal);
extern int journal_wipe (journal_t *, int);
extern int journal_skip_recovery (journal_t *);
extern void journal_update_superblock (journal_t *, int);
extern void __journal_abort_hard (journal_t *);
extern void journal_abort (journal_t *, int);
extern int journal_errno (journal_t *);
extern void journal_ack_err (journal_t *);
extern int journal_clear_err (journal_t *);
extern int journal_bmap(journal_t *, unsigned long, unsigned long *);
extern int journal_force_commit(journal_t *);
extern int jbd2_journal_create (journal_t *);
extern int jbd2_journal_load (journal_t *journal);
extern void jbd2_journal_destroy (journal_t *);
extern int jbd2_journal_recover (journal_t *journal);
extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_superblock (journal_t *, int);
extern void __jbd2_journal_abort_hard (journal_t *);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long *);
extern int jbd2_journal_force_commit(journal_t *);
/*
* journal_head management
*/
struct journal_head *journal_add_journal_head(struct buffer_head *bh);
struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
void journal_remove_journal_head(struct buffer_head *bh);
void journal_put_journal_head(struct journal_head *jh);
struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
void jbd2_journal_remove_journal_head(struct buffer_head *bh);
void jbd2_journal_put_journal_head(struct journal_head *jh);
/*
* handle management
*/
extern kmem_cache_t *jbd_handle_cache;
extern kmem_cache_t *jbd2_handle_cache;
static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
{
return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
}
static inline void jbd_free_handle(handle_t *handle)
{
kmem_cache_free(jbd_handle_cache, handle);
kmem_cache_free(jbd2_handle_cache, handle);
}
/* Primary revoke support */
#define JOURNAL_REVOKE_DEFAULT_HASH 256
extern int journal_init_revoke(journal_t *, int);
extern void journal_destroy_revoke_caches(void);
extern int journal_init_revoke_caches(void);
extern int jbd2_journal_init_revoke(journal_t *, int);
extern void jbd2_journal_destroy_revoke_caches(void);
extern int jbd2_journal_init_revoke_caches(void);
extern void journal_destroy_revoke(journal_t *);
extern int journal_revoke (handle_t *,
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *,
unsigned long, struct buffer_head *);
extern int journal_cancel_revoke(handle_t *, struct journal_head *);
extern void journal_write_revoke_records(journal_t *, transaction_t *);
extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
/* Recovery revoke support */
extern int journal_set_revoke(journal_t *, unsigned long, tid_t);
extern int journal_test_revoke(journal_t *, unsigned long, tid_t);
extern void journal_clear_revoke(journal_t *);
extern void journal_switch_revoke_table(journal_t *journal);
extern int jbd2_journal_set_revoke(journal_t *, unsigned long, tid_t);
extern int jbd2_journal_test_revoke(journal_t *, unsigned long, tid_t);
extern void jbd2_journal_clear_revoke(journal_t *);
extern void jbd2_journal_switch_revoke_table(journal_t *journal);
/*
* The log thread user interface:
@ -986,17 +986,17 @@ extern void journal_switch_revoke_table(journal_t *journal);
* transitions on demand.
*/
int __log_space_left(journal_t *); /* Called with journal locked */
int log_start_commit(journal_t *journal, tid_t tid);
int __log_start_commit(journal_t *journal, tid_t tid);
int journal_start_commit(journal_t *journal, tid_t *tid);
int journal_force_commit_nested(journal_t *journal);
int log_wait_commit(journal_t *journal, tid_t tid);
int log_do_checkpoint(journal_t *journal);
int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_journal_force_commit_nested(journal_t *journal);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
int jbd2_log_do_checkpoint(journal_t *journal);
void __log_wait_for_space(journal_t *journal);
extern void __journal_drop_transaction(journal_t *, transaction_t *);
extern int cleanup_journal_tail(journal_t *);
void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
/* Debugging code only: */
@ -1010,7 +1010,7 @@ do { \
/*
* is_journal_abort
*
* Simple test wrapper function to test the JFS_ABORT state flag. This
* Simple test wrapper function to test the JBD2_ABORT state flag. This
* bit, when set, indicates that we have had a fatal error somewhere,
* either inside the journaling layer or indicated to us by the client
* (eg. ext3), and that we and should not commit any further
@ -1019,7 +1019,7 @@ do { \
static inline int is_journal_aborted(journal_t *journal)
{
return journal->j_flags & JFS_ABORT;
return journal->j_flags & JBD2_ABORT;
}
static inline int is_handle_aborted(handle_t *handle)
@ -1029,7 +1029,7 @@ static inline int is_handle_aborted(handle_t *handle)
return is_journal_aborted(handle->h_transaction->t_journal);
}
static inline void journal_abort_handle(handle_t *handle)
static inline void jbd2_journal_abort_handle(handle_t *handle)
{
handle->h_aborted = 1;
}
@ -1051,7 +1051,7 @@ static inline int tid_geq(tid_t x, tid_t y)
return (difference >= 0);
}
extern int journal_blocks_per_page(struct inode *inode);
extern int jbd2_journal_blocks_per_page(struct inode *inode);
/*
* Return the minimum number of blocks which must be free in the journal