aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/jbd2/Makefile4
-rw-r--r--fs/jbd2/checkpoint.c54
-rw-r--r--fs/jbd2/commit.c122
-rw-r--r--fs/jbd2/journal.c454
-rw-r--r--fs/jbd2/recovery.c46
-rw-r--r--fs/jbd2/revoke.c146
-rw-r--r--fs/jbd2/transaction.c244
-rw-r--r--include/linux/ext4_jbd2.h26
-rw-r--r--include/linux/jbd2.h248
9 files changed, 672 insertions, 672 deletions
diff --git a/fs/jbd2/Makefile b/fs/jbd2/Makefile
index 54aca4868a3..802a3413872 100644
--- a/fs/jbd2/Makefile
+++ b/fs/jbd2/Makefile
@@ -2,6 +2,6 @@
# Makefile for the linux journaling routines.
#
-obj-$(CONFIG_JBD) += jbd.o
+obj-$(CONFIG_JBD2) += jbd2.o
-jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
+jbd2-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 0208cc7ac5d..68039fa9a56 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -19,7 +19,7 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -95,9 +95,9 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- ret = __journal_remove_checkpoint(jh) + 1;
+ ret = __jbd2_journal_remove_checkpoint(jh) + 1;
jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
__brelse(bh);
} else {
@@ -107,19 +107,19 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
}
/*
- * __log_wait_for_space: wait until there is space in the journal.
+ * __jbd2_log_wait_for_space: wait until there is space in the journal.
*
* Called under j-state_lock *only*. It will be unlocked if we have to wait
* for a checkpoint to free up some space in the log.
*/
-void __log_wait_for_space(journal_t *journal)
+void __jbd2_log_wait_for_space(journal_t *journal)
{
int nblocks;
assert_spin_locked(&journal->j_state_lock);
nblocks = jbd_space_needed(journal);
- while (__log_space_left(journal) < nblocks) {
- if (journal->j_flags & JFS_ABORT)
+ while (__jbd2_log_space_left(journal) < nblocks) {
+ if (journal->j_flags & JBD2_ABORT)
return;
spin_unlock(&journal->j_state_lock);
mutex_lock(&journal->j_checkpoint_mutex);
@@ -130,9 +130,9 @@ void __log_wait_for_space(journal_t *journal)
*/
spin_lock(&journal->j_state_lock);
nblocks = jbd_space_needed(journal);
- if (__log_space_left(journal) < nblocks) {
+ if (__jbd2_log_space_left(journal) < nblocks) {
spin_unlock(&journal->j_state_lock);
- log_do_checkpoint(journal);
+ jbd2_log_do_checkpoint(journal);
spin_lock(&journal->j_state_lock);
}
mutex_unlock(&journal->j_checkpoint_mutex);
@@ -198,9 +198,9 @@ restart:
* Now in whatever state the buffer currently is, we know that
* it has been written out and so we can drop it from the list
*/
- released = __journal_remove_checkpoint(jh);
+ released = __jbd2_journal_remove_checkpoint(jh);
jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
}
@@ -252,16 +252,16 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- log_start_commit(journal, tid);
- log_wait_commit(journal, tid);
+ jbd2_log_start_commit(journal, tid);
+ jbd2_log_wait_commit(journal, tid);
ret = 1;
} else if (!buffer_dirty(bh)) {
J_ASSERT_JH(jh, !buffer_jbddirty(bh));
BUFFER_TRACE(bh, "remove from checkpoint");
- __journal_remove_checkpoint(jh);
+ __jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
__brelse(bh);
ret = 1;
} else {
@@ -296,7 +296,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
*
* The journal should be locked before calling this function.
*/
-int log_do_checkpoint(journal_t *journal)
+int jbd2_log_do_checkpoint(journal_t *journal)
{
transaction_t *transaction;
tid_t this_tid;
@@ -309,7 +309,7 @@ int log_do_checkpoint(journal_t *journal)
* don't need checkpointing, just eliminate them from the
* journal straight away.
*/
- result = cleanup_journal_tail(journal);
+ result = jbd2_cleanup_journal_tail(journal);
jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
if (result <= 0)
return result;
@@ -374,7 +374,7 @@ restart:
}
out:
spin_unlock(&journal->j_list_lock);
- result = cleanup_journal_tail(journal);
+ result = jbd2_cleanup_journal_tail(journal);
if (result < 0)
return result;
return 0;
@@ -397,7 +397,7 @@ out:
* we have an abort error outstanding.
*/
-int cleanup_journal_tail(journal_t *journal)
+int jbd2_cleanup_journal_tail(journal_t *journal)
{
transaction_t * transaction;
tid_t first_tid;
@@ -452,8 +452,8 @@ int cleanup_journal_tail(journal_t *journal)
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
spin_unlock(&journal->j_state_lock);
- if (!(journal->j_flags & JFS_ABORT))
- journal_update_superblock(journal, 1);
+ if (!(journal->j_flags & JBD2_ABORT))
+ jbd2_journal_update_superblock(journal, 1);
return 0;
}
@@ -518,7 +518,7 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
* Returns number of buffers reaped (for debug)
*/
-int __journal_clean_checkpoint_list(journal_t *journal)
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret = 0;
@@ -578,7 +578,7 @@ out:
* This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
-int __journal_remove_checkpoint(struct journal_head *jh)
+int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
{
transaction_t *transaction;
journal_t *journal;
@@ -607,7 +607,7 @@ int __journal_remove_checkpoint(struct journal_head *jh)
* dropped!
*
* The locking here around j_committing_transaction is a bit sleazy.
- * See the comment at the end of journal_commit_transaction().
+ * See the comment at the end of jbd2_journal_commit_transaction().
*/
if (transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "belongs to committing transaction");
@@ -617,7 +617,7 @@ int __journal_remove_checkpoint(struct journal_head *jh)
/* OK, that was the last buffer for the transaction: we can now
safely remove this transaction from the log */
- __journal_drop_transaction(journal, transaction);
+ __jbd2_journal_drop_transaction(journal, transaction);
/* Just in case anybody was waiting for more transactions to be
checkpointed... */
@@ -636,7 +636,7 @@ out:
* Called with the journal locked.
* Called with j_list_lock held.
*/
-void __journal_insert_checkpoint(struct journal_head *jh,
+void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
transaction_t *transaction)
{
JBUFFER_TRACE(jh, "entry");
@@ -666,7 +666,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
* Called with j_list_lock held.
*/
-void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
+void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
{
assert_spin_locked(&journal->j_list_lock);
if (transaction->t_cpnext) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 10be51290a2..b1a4eafc154 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -1,5 +1,5 @@
/*
- * linux/fs/jbd/commit.c
+ * linux/fs/jbd2/commit.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
@@ -15,7 +15,7 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -111,7 +111,7 @@ static int journal_write_commit_record(journal_t *journal,
if (is_journal_aborted(journal))
return 0;
- descriptor = journal_get_descriptor_buffer(journal);
+ descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor)
return 1;
@@ -120,14 +120,14 @@ static int journal_write_commit_record(journal_t *journal,
/* AKPM: buglet - add `i' to tmp! */
for (i = 0; i < bh->b_size; i += 512) {
journal_header_t *tmp = (journal_header_t*)bh->b_data;
- tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
- tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
+ tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+ tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
}
JBUFFER_TRACE(descriptor, "write commit block");
set_buffer_dirty(bh);
- if (journal->j_flags & JFS_BARRIER) {
+ if (journal->j_flags & JBD2_BARRIER) {
set_buffer_ordered(bh);
barrier_done = 1;
}
@@ -145,7 +145,7 @@ static int journal_write_commit_record(journal_t *journal,
"disabling barriers\n",
bdevname(journal->j_dev, b));
spin_lock(&journal->j_state_lock);
- journal->j_flags &= ~JFS_BARRIER;
+ journal->j_flags &= ~JBD2_BARRIER;
spin_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
@@ -155,7 +155,7 @@ static int journal_write_commit_record(journal_t *journal,
ret = sync_dirty_buffer(bh);
}
put_bh(bh); /* One for getblk() */
- journal_put_journal_head(descriptor);
+ jbd2_journal_put_journal_head(descriptor);
return (ret == -EIO);
}
@@ -239,7 +239,7 @@ write_out_data:
if (locked && test_clear_buffer_dirty(bh)) {
BUFFER_TRACE(bh, "needs writeout, adding to array");
wbuf[bufs++] = bh;
- __journal_file_buffer(jh, commit_transaction,
+ __jbd2_journal_file_buffer(jh, commit_transaction,
BJ_Locked);
jbd_unlock_bh_state(bh);
if (bufs == journal->j_wbufsize) {
@@ -251,13 +251,13 @@ write_out_data:
}
else {
BUFFER_TRACE(bh, "writeout complete: unfile");
- __journal_unfile_buffer(jh);
+ __jbd2_journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
if (locked)
unlock_buffer(bh);
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
/* Once for our safety reference, once for
- * journal_remove_journal_head() */
+ * jbd2_journal_remove_journal_head() */
put_bh(bh);
put_bh(bh);
}
@@ -272,12 +272,12 @@ write_out_data:
}
/*
- * journal_commit_transaction
+ * jbd2_journal_commit_transaction
*
* The primary function for committing a transaction to the log. This
* function is called by the journal thread to begin a complete commit.
*/
-void journal_commit_transaction(journal_t *journal)
+void jbd2_journal_commit_transaction(journal_t *journal)
{
transaction_t *commit_transaction;
struct journal_head *jh, *new_jh, *descriptor;
@@ -305,10 +305,10 @@ void journal_commit_transaction(journal_t *journal)
spin_unlock(&journal->j_list_lock);
#endif
- /* Do we need to erase the effects of a prior journal_flush? */
- if (journal->j_flags & JFS_FLUSHED) {
+ /* Do we need to erase the effects of a prior jbd2_journal_flush? */
+ if (journal->j_flags & JBD2_FLUSHED) {
jbd_debug(3, "super block updated\n");
- journal_update_superblock(journal, 1);
+ jbd2_journal_update_superblock(journal, 1);
} else {
jbd_debug(3, "superblock not updated\n");
}
@@ -350,7 +350,7 @@ void journal_commit_transaction(journal_t *journal)
* BJ_Reserved buffers. Note, it is _not_ permissible to assume
* that there are no such buffers: if a large filesystem
* operation like a truncate needs to split itself over multiple
- * transactions, then it may try to do a journal_restart() while
+ * transactions, then it may try to do a jbd2_journal_restart() while
* there are still BJ_Reserved buffers outstanding. These must
* be released cleanly from the current transaction.
*
@@ -358,25 +358,25 @@ void journal_commit_transaction(journal_t *journal)
* again before modifying the buffer in the new transaction, but
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
- * that multiple journal_get_write_access() calls to the same
+ * that multiple jbd2_journal_get_write_access() calls to the same
* buffer are perfectly permissable.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
JBUFFER_TRACE(jh, "reserved, unused: refile");
/*
- * A journal_get_undo_access()+journal_release_buffer() may
+ * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
* leave undo-committed data.
*/
if (jh->b_committed_data) {
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
- jbd_slab_free(jh->b_committed_data, bh->b_size);
+ jbd2_slab_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
- journal_refile_buffer(journal, jh);
+ jbd2_journal_refile_buffer(journal, jh);
}
/*
@@ -385,7 +385,7 @@ void journal_commit_transaction(journal_t *journal)
* frees some memory
*/
spin_lock(&journal->j_list_lock);
- __journal_clean_checkpoint_list(journal);
+ __jbd2_journal_clean_checkpoint_list(journal);
spin_unlock(&journal->j_list_lock);
jbd_debug (3, "JBD: commit phase 1\n");
@@ -393,7 +393,7 @@ void journal_commit_transaction(journal_t *journal)
/*
* Switch to a new revoke table.
*/
- journal_switch_revoke_table(journal);
+ jbd2_journal_switch_revoke_table(journal);
commit_transaction->t_state = T_FLUSH;
journal->j_committing_transaction = commit_transaction;
@@ -450,9 +450,9 @@ void journal_commit_transaction(journal_t *journal)
continue;
}
if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
- __journal_unfile_buffer(jh);
+ __jbd2_journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
put_bh(bh);
} else {
jbd_unlock_bh_state(bh);
@@ -463,9 +463,9 @@ void journal_commit_transaction(journal_t *journal)
spin_unlock(&journal->j_list_lock);
if (err)
- __journal_abort_hard(journal);
+ __jbd2_journal_abort_hard(journal);
- journal_write_revoke_records(journal, commit_transaction);
+ jbd2_journal_write_revoke_records(journal, commit_transaction);
jbd_debug(3, "JBD: commit phase 2\n");
@@ -499,7 +499,7 @@ void journal_commit_transaction(journal_t *journal)
if (is_journal_aborted(journal)) {
JBUFFER_TRACE(jh, "journal is aborting: refile");
- journal_refile_buffer(journal, jh);
+ jbd2_journal_refile_buffer(journal, jh);
/* If that was the last one, we need to clean up
* any descriptor buffers which may have been
* already allocated, even if we are now
@@ -519,9 +519,9 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug(4, "JBD: get descriptor\n");
- descriptor = journal_get_descriptor_buffer(journal);
+ descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor) {
- __journal_abort_hard(journal);
+ __jbd2_journal_abort_hard(journal);
continue;
}
@@ -529,8 +529,8 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug(4, "JBD: got buffer %llu (%p)\n",
(unsigned long long)bh->b_blocknr, bh->b_data);
header = (journal_header_t *)&bh->b_data[0];
- header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
- header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
+ header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+ header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
tagp = &bh->b_data[sizeof(journal_header_t)];
@@ -543,25 +543,25 @@ void journal_commit_transaction(journal_t *journal)
/* Record it so that we can wait for IO
completion later */
BUFFER_TRACE(bh, "ph3: file as descriptor");
- journal_file_buffer(descriptor, commit_transaction,
+ jbd2_journal_file_buffer(descriptor, commit_transaction,
BJ_LogCtl);
}
/* Where is the buffer to be written? */
- err = journal_next_log_block(journal, &blocknr);
+ err = jbd2_journal_next_log_block(journal, &blocknr);
/* If the block mapping failed, just abandon the buffer
and repeat this loop: we'll fall into the
refile-on-abort condition above. */
if (err) {
- __journal_abort_hard(journal);
+ __jbd2_journal_abort_hard(journal);
continue;
}
/*
* start_this_handle() uses t_outstanding_credits to determine
* the free space in the log, but this counter is changed
- * by journal_next_log_block() also.
+ * by jbd2_journal_next_log_block() also.
*/
commit_transaction->t_outstanding_credits--;
@@ -576,13 +576,13 @@ void journal_commit_transaction(journal_t *journal)
set_bit(BH_JWrite, &jh2bh(jh)->b_state);
/*
- * akpm: journal_write_metadata_buffer() sets
+ * akpm: jbd2_journal_write_metadata_buffer() sets
* new_bh->b_transaction to commit_transaction.
* We need to clean this up before we release new_bh
* (which is of type BJ_IO)
*/
JBUFFER_TRACE(jh, "ph3: write metadata");
- flags = journal_write_metadata_buffer(commit_transaction,
+ flags = jbd2_journal_write_metadata_buffer(commit_transaction,
jh, &new_jh, blocknr);
set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
wbuf[bufs++] = jh2bh(new_jh);
@@ -592,9 +592,9 @@ void journal_commit_transaction(journal_t *journal)
tag_flag = 0;
if (flags & 1)
- tag_flag |= JFS_FLAG_ESCAPE;
+ tag_flag |= JBD2_FLAG_ESCAPE;
if (!first_tag)
- tag_flag |= JFS_FLAG_SAME_UUID;
+ tag_flag |= JBD2_FLAG_SAME_UUID;
tag = (journal_block_tag_t *) tagp;
tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
@@ -622,7 +622,7 @@ void journal_commit_transaction(journal_t *journal)
submitting the IOs. "tag" still points to
the last tag we set up. */
- tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
+ tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
start_journal_io:
for (i = 0; i < bufs; i++) {
@@ -678,14 +678,14 @@ wait_for_iobuf:
clear_buffer_jwrite(bh);
JBUFFER_TRACE(jh, "ph4: unfile after journal write");
- journal_unfile_buffer(journal, jh);
+ jbd2_journal_unfile_buffer(journal, jh);
/*
* ->t_iobuf_list should contain only dummy buffer_heads
- * which were created by journal_write_metadata_buffer().
+ * which were created by jbd2_journal_write_metadata_buffer().
*/
BUFFER_TRACE(bh, "dumping temporary bh");
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
__brelse(bh);
J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
free_buffer_head(bh);
@@ -702,7 +702,7 @@ wait_for_iobuf:
we finally commit, we can do any checkpointing
required. */
JBUFFER_TRACE(jh, "file as BJ_Forget");
- journal_file_buffer(jh, commit_transaction, BJ_Forget);
+ jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
/* Wake up any transactions which were waiting for this
IO to complete */
wake_up_bit(&bh->b_state, BH_Unshadow);
@@ -733,8 +733,8 @@ wait_for_iobuf:
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
- journal_unfile_buffer(journal, jh);
- journal_put_journal_head(jh);
+ jbd2_journal_unfile_buffer(journal, jh);
+ jbd2_journal_put_journal_head(jh);
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -745,7 +745,7 @@ wait_for_iobuf:
err = -EIO;
if (err)
- __journal_abort_hard(journal);
+ __jbd2_journal_abort_hard(journal);
/* End of a transaction! Finally, we can do checkpoint
processing: any buffers committed as a result of this
@@ -789,14 +789,14 @@ restart_loop:
* Otherwise, we can just throw away the frozen data now.
*/
if (jh->b_committed_data) {
- jbd_slab_free(jh->b_committed_data, bh->b_size);
+ jbd2_slab_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL;
}
} else if (jh->b_frozen_data) {
- jbd_slab_free(jh->b_frozen_data, bh->b_size);
+ jbd2_slab_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL;
}
@@ -804,12 +804,12 @@ restart_loop:
cp_transaction = jh->b_cp_transaction;
if (cp_transaction) {
JBUFFER_TRACE(jh, "remove from old cp transaction");
- __journal_remove_checkpoint(jh);
+ __jbd2_journal_remove_checkpoint(jh);
}
/* Only re-checkpoint the buffer_head if it is marked
* dirty. If the buffer was added to the BJ_Forget list
- * by journal_forget, it may no longer be dirty and
+ * by jbd2_journal_forget, it may no longer be dirty and
* there's no point in keeping a checkpoint record for
* it. */
@@ -828,9 +828,9 @@ restart_loop:
if (buffer_jbddirty(bh)) {
JBUFFER_TRACE(jh, "add to new checkpointing trans");
- __journal_insert_checkpoint(jh, commit_transaction);
+ __jbd2_journal_insert_checkpoint(jh, commit_transaction);
JBUFFER_TRACE(jh, "refile for checkpoint writeback");
- __journal_refile_buffer(jh);
+ __jbd2_journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
@@ -842,11 +842,11 @@ restart_loop:
* disk and before we process the buffer on BJ_Forget
* list. */
JBUFFER_TRACE(jh, "refile or unfile freed buffer");
- __journal_refile_buffer(jh);
+ __jbd2_journal_refile_buffer(jh);
if (!jh->b_transaction) {
jbd_unlock_bh_state(bh);
/* needs a brelse */
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
release_buffer_page(bh);
} else
jbd_unlock_bh_state(bh);
@@ -856,9 +856,9 @@ restart_loop:
spin_unlock(&journal->j_list_lock);
/*
* This is a bit sleazy. We borrow j_list_lock to protect
- * journal->j_committing_transaction in __journal_remove_checkpoint.
- * Really, __journal_remove_checkpoint should be using j_state_lock but
- * it's a bit hassle to hold that across __journal_remove_checkpoint
+ * journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
+ * Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
+ * it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
@@ -885,7 +885,7 @@ restart_loop:
spin_unlock(&journal->j_state_lock);
if (commit_transaction->t_checkpoint_list == NULL) {
- __journal_drop_transaction(journal, commit_transaction);
+ __jbd2_journal_drop_transaction(journal, commit_transaction);
} else {
if (journal->j_checkpoint_transactions == NULL) {
journal->j_checkpoint_transactions = commit_transaction;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c518dd8fe60..3fbbba20a51 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1,5 +1,5 @@
/*
- * linux/fs/jbd/journal.c
+ * linux/fs/jbd2/journal.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
@@ -40,51 +40,51 @@
#include <asm/uaccess.h>
#include <asm/page.h>
-EXPORT_SYMBOL(journal_start);
-EXPORT_SYMBOL(journal_restart);
-EXPORT_SYMBOL(journal_extend);
-EXPORT_SYMBOL(journal_stop);
-EXPORT_SYMBOL(journal_lock_updates);
-EXPORT_SYMBOL(journal_unlock_updates);
-EXPORT_SYMBOL(journal_get_write_access);
-EXPORT_SYMBOL(journal_get_create_access);
-EXPORT_SYMBOL(journal_get_undo_access);
-EXPORT_SYMBOL(journal_dirty_data);
-EXPORT_SYMBOL(journal_dirty_metadata);
-EXPORT_SYMBOL(journal_release_buffer);
-EXPORT_SYMBOL(journal_forget);
+EXPORT_SYMBOL(jbd2_journal_start);
+EXPORT_SYMBOL(jbd2_journal_restart);
+EXPORT_SYMBOL(jbd2_journal_extend);
+EXPORT_SYMBOL(jbd2_journal_stop);
+EXPORT_SYMBOL(jbd2_journal_lock_updates);
+EXPORT_SYMBOL(jbd2_journal_unlock_updates);
+EXPORT_SYMBOL(jbd2_journal_get_write_access);
+EXPORT_SYMBOL(jbd2_journal_get_create_access);
+EXPORT_SYMBOL(jbd2_journal_get_undo_access);
+EXPORT_SYMBOL(jbd2_journal_dirty_data);
+EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
+EXPORT_SYMBOL(jbd2_journal_release_buffer);
+EXPORT_SYMBOL(jbd2_journal_forget);
#if 0
EXPORT_SYMBOL(journal_sync_buffer);
#endif
-EXPORT_SYMBOL(journal_flush);
-EXPORT_SYMBOL(journal_revoke);
-
-EXPORT_SYMBOL(journal_init_dev);
-EXPORT_SYMBOL(journal_init_inode);
-EXPORT_SYMBOL(journal_update_format);
-EXPORT_SYMBOL(journal_check_used_features);
-EXPORT_SYMBOL(journal_check_available_features);
-EXPORT_SYMBOL(journal_set_features);
-EXPORT_SYMBOL(journal_create);
-EXPORT_SYMBOL(journal_load);
-EXPORT_SYMBOL(journal_destroy);
-EXPORT_SYMBOL(journal_update_superblock);
-EXPORT_SYMBOL(journal_abort);
-EXPORT_SYMBOL(journal_errno);
-EXPORT_SYMBOL(journal_ack_err);
-EXPORT_SYMBOL(journal_clear_err);
-EXPORT_SYMBOL(log_wait_commit);
-EXPORT_SYMBOL(journal_start_commit);
-EXPORT_SYMBOL(journal_force_commit_nested);
-EXPORT_SYMBOL(journal_wipe);
-EXPORT_SYMBOL(journal_blocks_per_page);
-EXPORT_SYMBOL(journal_invalidatepage);
-EXPORT_SYMBOL(journal_try_to_free_buffers);
-EXPORT_SYMBOL(journal_force_commit);
+EXPORT_SYMBOL(jbd2_journal_flush);
+EXPORT_SYMBOL(jbd2_journal_revoke);
+
+EXPORT_SYMBOL(jbd2_journal_init_dev);
+EXPORT_SYMBOL(jbd2_journal_init_inode);
+EXPORT_SYMBOL(jbd2_journal_update_format);
+EXPORT_SYMBOL(jbd2_journal_check_used_features);
+EXPORT_SYMBOL(jbd2_journal_check_available_features);
+EXPORT_SYMBOL(jbd2_journal_set_features);
+EXPORT_SYMBOL(jbd2_journal_create);
+EXPORT_SYMBOL(jbd2_journal_load);
+EXPORT_SYMBOL(jbd2_journal_destroy);
+EXPORT_SYMBOL(jbd2_journal_update_superblock);
+EXPORT_SYMBOL(jbd2_journal_abort);
+EXPORT_SYMBOL(jbd2_journal_errno);
+EXPORT_SYMBOL(jbd2_journal_ack_err);
+EXPORT_SYMBOL(jbd2_journal_clear_err);
+EXPORT_SYMBOL(jbd2_log_wait_commit);
+EXPORT_SYMBOL(jbd2_journal_start_commit);
+EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
+EXPORT_SYMBOL(jbd2_journal_wipe);
+EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
+EXPORT_SYMBOL(jbd2_journal_invalidatepage);
+EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
+EXPORT_SYMBOL(jbd2_journal_force_commit);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
-static int journal_create_jbd_slab(size_t slab_size);
+static int jbd2_journal_create_jbd_slab(size_t slab_size);
/*
* Helper function used to manage commit timeouts
@@ -98,7 +98,7 @@ static void commit_timeout(unsigned long __data)
}
/*
- * kjournald: The main thread function used to manage a logging device
+ * kjournald2: The main thread function used to manage a logging device
* journal.
*
* This kernel thread is responsible for two things:
@@ -113,7 +113,7 @@ static void commit_timeout(unsigned long __data)
* known as checkpointing, and this thread is responsible for that job.
*/
-static int kjournald(void *arg)
+static int kjournald2(void *arg)
{
journal_t *journal = arg;
transaction_t *transaction;
@@ -129,7 +129,7 @@ static int kjournald(void *arg)
journal->j_task = current;
wake_up(&journal->j_wait_done_commit);
- printk(KERN_INFO "kjournald starting. Commit interval %ld seconds\n",
+ printk(KERN_INFO "kjournald2 starting. Commit interval %ld seconds\n",
journal->j_commit_interval / HZ);
/*
@@ -138,7 +138,7 @@ static int kjournald(void *arg)
spin_lock(&journal->j_state_lock);
loop:
- if (journal->j_flags & JFS_UNMOUNT)
+ if (journal->j_flags & JBD2_UNMOUNT)
goto end_loop;
jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
@@ -148,7 +148,7 @@ loop:
jbd_debug(1, "OK, requests differ\n");
spin_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
- journal_commit_transaction(journal);
+ jbd2_journal_commit_transaction(journal);
spin_lock(&journal->j_state_lock);
goto loop;
}
@@ -160,7 +160,7 @@ loop:
* good idea, because that depends on threads that may
* be already stopped.
*/
- jbd_debug(1, "Now suspending kjournald\n");
+ jbd_debug(1, "Now suspending kjournald2\n");
spin_unlock(&journal->j_state_lock);
refrigerator();
spin_lock(&journal->j_state_lock);
@@ -180,7 +180,7 @@ loop:
if (transaction && time_after_eq(jiffies,
transaction->t_expires))
should_sleep = 0;
- if (journal->j_flags & JFS_UNMOUNT)
+ if (journal->j_flags & JBD2_UNMOUNT)
should_sleep = 0;
if (should_sleep) {
spin_unlock(&journal->j_state_lock);
@@ -190,7 +190,7 @@ loop:
finish_wait(&journal->j_wait_commit, &wait);
}
- jbd_debug(1, "kjournald wakes\n");
+ jbd_debug(1, "kjournald2 wakes\n");
/*
* Were we woken up by a commit wakeup event?
@@ -211,16 +211,16 @@ end_loop:
return 0;
}
-static void journal_start_thread(journal_t *journal)
+static void jbd2_journal_start_thread(journal_t *journal)
{
- kthread_run(kjournald, journal, "kjournald");
+ kthread_run(kjournald2, journal, "kjournald2");
wait_event(journal->j_wait_done_commit, journal->j_task != 0);
}
static void journal_kill_thread(journal_t *journal)
{
spin_lock(&journal->j_state_lock);
- journal->j_flags |= JFS_UNMOUNT;
+ journal->j_flags |= JBD2_UNMOUNT;
while (journal->j_task) {
wake_up(&journal->j_wait_commit);
@@ -232,7 +232,7 @@ static void journal_kill_thread(journal_t *journal)
}
/*
- * journal_write_metadata_buffer: write a metadata buffer to the journal.
+ * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
*
* Writes a metadata buffer to a given disk block. The actual IO is not
* performed but a new buffer_head is constructed which labels the data
@@ -240,7 +240,7 @@ static void journal_kill_thread(journal_t *journal)
*
* Any magic-number escaping which needs to be done will cause a
* copy-out here. If the buffer happens to start with the
- * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
+ * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the
* magic number is only written to the log for descripter blocks. In
* this case, we copy the data and replace the first word with 0, and we
* return a result code which indicates that this buffer needs to be
@@ -268,7 +268,7 @@ static void journal_kill_thread(journal_t *journal)
* Bit 1 set == buffer copy-out performed (kfree the data after IO)
*/
-int journal_write_metadata_buffer(transaction_t *transaction,
+int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
struct journal_head **jh_out,
unsigned long blocknr)
@@ -316,7 +316,7 @@ repeat:
* Check for escaping
*/
if (*((__be32 *)(mapped_data + new_offset)) ==
- cpu_to_be32(JFS_MAGIC_NUMBER)) {
+ cpu_to_be32(JBD2_MAGIC_NUMBER)) {
need_copy_out = 1;
do_escape = 1;
}
@@ -329,10 +329,10 @@ repeat:
char *tmp;
jbd_unlock_bh_state(bh_in);
- tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
+ tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS);
jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) {
- jbd_slab_free(tmp, bh_in->b_size);
+ jbd2_slab_free(tmp, bh_in->b_size);
goto repeat;
}
@@ -362,7 +362,7 @@ repeat:
atomic_set(&new_bh->b_count, 1);
jbd_unlock_bh_state(bh_in);
- new_jh = journal_add_journal_head(new_bh); /* This sleeps */
+ new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
set_bh_page(new_bh, new_page, new_offset);
new_jh->b_transaction = NULL;
@@ -380,9 +380,9 @@ repeat:
* copying is moved to the transaction's shadow queue.
*/
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
- journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
JBUFFER_TRACE(new_jh, "file as BJ_IO");
- journal_file_buffer(new_jh, transaction, BJ_IO);
+ jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
return do_escape | (done_copy_out << 1);
}
@@ -393,14 +393,14 @@ repeat:
*/
/*
- * __log_space_left: Return the number of free blocks left in the journal.
+ * __jbd2_log_space_left: Return the number of free blocks left in the journal.
*
* Called with the journal already locked.
*
* Called under j_state_lock
*/
-int __log_space_left(journal_t *journal)
+int __jbd2_log_space_left(journal_t *journal)
{
int left = journal->j_free;
@@ -424,7 +424,7 @@ int __log_space_left(journal_t *journal)
/*
* Called under j_state_lock. Returns true if a transaction was started.
*/
-int __log_start_commit(journal_t *journal, tid_t target)
+int __jbd2_log_start_commit(journal_t *journal, tid_t target)
{
/*
* Are we already doing a recent enough commit?
@@ -445,12 +445,12 @@ int __log_start_commit(journal_t *journal, tid_t target)
return 0;
}
-int log_start_commit(journal_t *journal, tid_t tid)
+int jbd2_log_start_commit(journal_t *journal, tid_t tid)
{
int ret;
spin_lock(&journal->j_state_lock);
- ret = __log_start_commit(journal, tid);
+ ret = __jbd2_log_start_commit(journal, tid);
spin_unlock(&journal->j_state_lock);
return ret;
}
@@ -465,7 +465,7 @@ int log_start_commit(journal_t *journal, tid_t tid)
*
* Returns true if a transaction was started.
*/
-int journal_force_commit_nested(journal_t *journal)
+int jbd2_journal_force_commit_nested(journal_t *journal)
{
transaction_t *transaction = NULL;
tid_t tid;
@@ -473,7 +473,7 @@ int journal_force_commit_nested(journal_t *journal)
spin_lock(&journal->j_state_lock);
if (journal->j_running_transaction && !current->journal_info) {
transaction = journal->j_running_transaction;
- __log_start_commit(journal, transaction->t_tid);
+ __jbd2_log_start_commit(journal, transaction->t_tid);
} else if (journal->j_committing_transaction)
transaction = journal->j_committing_transaction;
@@ -484,7 +484,7 @@ int journal_force_commit_nested(journal_t *journal)
tid = transaction->t_tid;
spin_unlock(&journal->j_state_lock);
- log_wait_commit(journal, tid);
+ jbd2_log_wait_commit(journal, tid);
return 1;
}
@@ -492,7 +492,7 @@ int journal_force_commit_nested(journal_t *journal)
* Start a commit of the current running transaction (if any). Returns true
* if a transaction was started, and fills its tid in at *ptid
*/
-int journal_start_commit(journal_t *journal, tid_t *ptid)
+int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
{
int ret = 0;
@@ -500,7 +500,7 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
- ret = __log_start_commit(journal, tid);
+ ret = __jbd2_log_start_commit(journal, tid);
if (ret && ptid)
*ptid = tid;
} else if (journal->j_committing_transaction && ptid) {
@@ -519,7 +519,7 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
* Wait for a specified commit to complete.
* The caller may not hold the journal lock.
*/
-int log_wait_commit(journal_t *journal, tid_t tid)
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
@@ -555,7 +555,7 @@ int log_wait_commit(journal_t *journal, tid_t tid)
* Log buffer allocation routines:
*/
-int journal_next_log_block(journal_t *journal, unsigned long *retp)
+int jbd2_journal_next_log_block(journal_t *journal, unsigned long *retp)
{
unsigned long blocknr;
@@ -568,7 +568,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
if (journal->j_head == journal->j_last)
journal->j_head = journal->j_first;
spin_unlock(&journal->j_state_lock);
- return journal_bmap(journal, blocknr, retp);
+ return jbd2_journal_bmap(journal, blocknr, retp);
}
/*
@@ -578,7 +578,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
* this is a no-op. If needed, we can use j_blk_offset - everything is
* ready.
*/
-int journal_bmap(journal_t *journal, unsigned long blocknr,
+int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
unsigned long *retp)
{
int err = 0;
@@ -610,18 +610,18 @@ int journal_bmap(journal_t *journal, unsigned long blocknr,
* the journal without copying their contents, but for journal
* descriptor blocks we do need to generate bona fide buffers.
*
- * After the caller of journal_get_descriptor_buffer() has finished modifying
+ * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
* the buffer's contents they really should run flush_dcache_page(bh->b_page).
* But we don't bother doing that, so there will be coherency problems with
* mmaps of blockdevs which hold live JBD-controlled filesystems.
*/
-struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
+struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
{
struct buffer_head *bh;
unsigned long blocknr;
int err;
- err = journal_next_log_block(journal, &blocknr);
+ err = jbd2_journal_next_log_block(journal, &blocknr);
if (err)
return NULL;
@@ -632,7 +632,7 @@ struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "return this buffer");
- return journal_add_journal_head(bh);
+ return jbd2_journal_add_journal_head(bh);
}
/*
@@ -669,10 +669,10 @@ static journal_t * journal_init_common (void)
journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
/* The journal is marked for error until we succeed with recovery! */
- journal->j_flags = JFS_ABORT;
+ journal->j_flags = JBD2_ABORT;
/* Set up a default-sized revoke table for the new mount. */
- err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
+ err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
if (err) {
kfree(journal);
goto fail;
@@ -682,7 +682,7 @@ fail:
return NULL;
}
-/* journal_init_dev and journal_init_inode:
+/* jbd2_journal_init_dev and jbd2_journal_init_inode:
*
* Create a journal structure assigned some fixed set of disk blocks to
* the journal. We don't actually touch those disk blocks yet, but we
@@ -692,7 +692,7 @@ fail:
*/
/**
- * journal_t * journal_init_dev() - creates an initialises a journal structure
+ * journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
* @bdev: Block device on which to create the journal
* @fs_dev: Device which hold journalled filesystem for this journal.
* @start: Block nr Start of journal.
@@ -700,11 +700,11 @@ fail:
* @blocksize: blocksize of journalling device
* @returns: a newly created journal_t *
*
- * journal_init_dev creates a journal which maps a fixed contiguous
+ * jbd2_journal_init_dev creates a journal which maps a fixed contiguous
* range of blocks on an arbitrary block device.
*
*/
-journal_t * journal_init_dev(struct block_device *bdev,
+journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
int start, int len, int blocksize)
{
@@ -740,14 +740,14 @@ journal_t * journal_init_dev(struct block_device *bdev,
}
/**
- * journal_t * journal_init_inode () - creates a journal which maps to a inode.
+ * journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode.
* @inode: An inode to create the journal in
*
- * journal_init_inode creates a journal which maps an on-disk inode as
+ * jbd2_journal_init_inode creates a journal which maps an on-disk inode as
* the journal. The inode must exist already, must support bmap() and
* must have all data blocks preallocated.
*/
-journal_t * journal_init_inode (struct inode *inode)
+journal_t * jbd2_journal_init_inode (struct inode *inode)
{
struct buffer_head *bh;
journal_t *journal = journal_init_common();
@@ -780,7 +780,7 @@ journal_t * journal_init_inode (struct inode *inode)
return NULL;
}
- err = journal_bmap(journal, 0, &blocknr);
+ err = jbd2_journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
@@ -838,27 +838,27 @@ static int journal_reset(journal_t *journal)
journal->j_max_transaction_buffers = journal->j_maxlen / 4;
/* Add the dynamic fields and write it to disk. */
- journal_update_superblock(journal, 1);
- journal_start_thread(journal);
+ jbd2_journal_update_superblock(journal, 1);
+ jbd2_journal_start_thread(journal);
return 0;
}
/**
- * int journal_create() - Initialise the new journal file
+ * int jbd2_journal_create() - Initialise the new journal file
* @journal: Journal to create. This structure must have been initialised
*
* Given a journal_t structure which tells us which disk blocks we can
* use, create a new journal superblock and initialise all of the
* journal fields from scratch.
**/
-int journal_create(journal_t *journal)
+int jbd2_journal_create(journal_t *journal)
{
unsigned long blocknr;
struct buffer_head *bh;
journal_superblock_t *sb;
int i, err;
- if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
+ if (journal->j_maxlen < JBD2_MIN_JOURNAL_BLOCKS) {
printk (KERN_ERR "Journal length (%d blocks) too short.\n",
journal->j_maxlen);
journal_fail_superblock(journal);
@@ -876,10 +876,10 @@ int journal_create(journal_t *journal)
}
/* Zero out the entire journal on disk. We cannot afford to
- have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
+ have any blocks on disk beginning with JBD2_MAGIC_NUMBER. */
jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
for (i = 0; i < journal->j_maxlen; i++) {
- err = journal_bmap(journal, i, &blocknr);
+ err = jbd2_journal_bmap(journal, i, &blocknr);
if (err)
return err;
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
@@ -899,8 +899,8 @@ int journal_create(journal_t *journal)
/* OK, fill in the initial static fields in the new superblock */
sb = journal->j_superblock;
- sb->s_header.h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
- sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
+ sb->s_header.h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+ sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
sb->s_blocksize = cpu_to_be32(journal->j_blocksize);
sb->s_maxlen = cpu_to_be32(journal->j_maxlen);
@@ -908,21 +908,21 @@ int journal_create(journal_t *journal)
journal->j_transaction_sequence = 1;
- journal->j_flags &= ~JFS_ABORT;
+ journal->j_flags &= ~JBD2_ABORT;
journal->j_format_version = 2;
return journal_reset(journal);
}
/**
- * void journal_update_superblock() - Update journal sb on disk.
+ * void jbd2_journal_update_superblock() - Update journal sb on disk.
* @journal: The journal to update.
* @wait: Set to '0' if you don't want to wait for IO completion.
*
* Update a journal's dynamic superblock fields and write it to disk,
* optionally waiting for the IO to complete.
*/
-void journal_update_superblock(journal_t *journal, int wait)
+void jbd2_journal_update_superblock(journal_t *journal, int wait)
{
journal_superblock_t *sb = journal->j_superblock;
struct buffer_head *bh = journal->j_sb_buffer;
@@ -931,7 +931,7 @@ void journal_update_superblock(journal_t *journal, int wait)
* As a special case, if the on-disk copy is already marked as needing
* no recovery (s_start == 0) and there are no outstanding transactions
* in the filesystem, then we can safely defer the superblock update
- * until the next commit by setting JFS_FLUSHED. This avoids
+ * until the next commit by setting JBD2_FLUSHED. This avoids
* attempting a write to a potential-readonly device.
*/
if (sb->s_start == 0 && journal->j_tail_sequence ==
@@ -966,9 +966,9 @@ out:
spin_lock(&journal->j_state_lock);
if (sb->s_start)
- journal->j_flags &= ~JFS_FLUSHED;
+ journal->j_flags &= ~JBD2_FLUSHED;
else
- journal->j_flags |= JFS_FLUSHED;
+ journal->j_flags |= JBD2_FLUSHED;
spin_unlock(&journal->j_state_lock);
}
@@ -1000,17 +1000,17 @@ static int journal_get_superblock(journal_t *journal)
err = -EINVAL;
- if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
+ if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
printk(KERN_WARNING "JBD: no valid journal superblock found\n");
goto out;
}
switch(be32_to_cpu(sb->s_header.h_blocktype)) {
- case JFS_SUPERBLOCK_V1:
+ case JBD2_SUPERBLOCK_V1:
journal->j_format_version = 1;
break;
- case JFS_SUPERBLOCK_V2:
+ case JBD2_SUPERBLOCK_V2:
journal->j_format_version = 2;
break;
default:
@@ -1059,14 +1059,14 @@ static int load_superblock(journal_t *journal)
/**
- * int journal_load() - Read journal from disk.
+ * int jbd2_journal_load() - Read journal from disk.
* @journal: Journal to act on.
*
* Given a journal_t structure which tells us which disk blocks contain
* a journal, read the journal from disk to initialise the in-memory
* structures.
*/
-int journal_load(journal_t *journal)
+int jbd2_journal_load(journal_t *journal)
{
int err;
journal_superblock_t *sb;
@@ -1081,9 +1081,9 @@ int journal_load(journal_t *journal)
if (journal->j_format_version >= 2) {
if ((sb->s_feature_ro_compat &
- ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
+ ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
(sb->s_feature_incompat &
- ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
+ ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
printk (KERN_WARNING
"JBD: Unrecognised features on journal\n");
return -EINVAL;
@@ -1093,13 +1093,13 @@ int journal_load(journal_t *journal)
/*
* Create a slab for this blocksize
*/
- err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
+ err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
if (err)
return err;
/* Let the recovery code check whether it needs to recover any
* data from the journal. */
- if (journal_recover(journal))
+ if (jbd2_journal_recover(journal))
goto recovery_error;
/* OK, we've finished with the dynamic journal bits:
@@ -1108,8 +1108,8 @@ int journal_load(journal_t *journal)
if (journal_reset(journal))
goto recovery_error;
- journal->j_flags &= ~JFS_ABORT;
- journal->j_flags |= JFS_LOADED;
+ journal->j_flags &= ~JBD2_ABORT;
+ journal->j_flags |= JBD2_LOADED;
return 0;
recovery_error:
@@ -1118,20 +1118,20 @@ recovery_error:
}
/**
- * void journal_destroy() - Release a journal_t structure.
+ * void jbd2_journal_destroy() - Release a journal_t structure.
* @journal: Journal to act on.
*
* Release a journal_t structure once it is no longer in use by the
* journaled object.
*/
-void journal_destroy(journal_t *journal)
+void jbd2_journal_destroy(journal_t *journal)
{
/* Wait for the commit thread to wake up and die. */
journal_kill_thread(journal);
/* Force a final log commit */
if (journal->j_running_transaction)
- journal_commit_transaction(journal);
+ jbd2_journal_commit_transaction(journal);
/* Force any old transactions to disk */
@@ -1139,7 +1139,7 @@ void journal_destroy(journal_t *journal)
spin_lock(&journal->j_list_lock);
while (journal->j_checkpoint_transactions != NULL) {
spin_unlock(&journal->j_list_lock);
- log_do_checkpoint(journal);
+ jbd2_log_do_checkpoint(journal);
spin_lock(&journal->j_list_lock);
}
@@ -1152,21 +1152,21 @@ void journal_destroy(journal_t *journal)
journal->j_tail = 0;
journal->j_tail_sequence = ++journal->j_transaction_sequence;
if (journal->j_sb_buffer) {
- journal_update_superblock(journal, 1);
+ jbd2_journal_update_superblock(journal, 1);
brelse(journal->j_sb_buffer);
}
if (journal->j_inode)
iput(journal->j_inode);
if (journal->j_revoke)
- journal_destroy_revoke(journal);
+ jbd2_journal_destroy_revoke(journal);
kfree(journal->j_wbuf);
kfree(journal);
}
/**
- *int journal_check_used_features () - Check if features specified are used.
+ *int jbd2_journal_check_used_features () - Check if features specified are used.
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
@@ -1176,7 +1176,7 @@ void journal_destroy(journal_t *journal)
* features. Return true (non-zero) if it does.
**/
-int journal_check_used_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
@@ -1197,7 +1197,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
}
/**
- * int journal_check_available_features() - Check feature set in journalling layer
+ * int jbd2_journal_check_available_features() - Check feature set in journalling layer
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
@@ -1207,7 +1207,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
* all of a given set of features on this journal. Return true
* (non-zero) if it can. */
-int journal_check_available_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
@@ -1224,16 +1224,16 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
if (journal->j_format_version != 2)
return 0;
- if ((compat & JFS_KNOWN_COMPAT_FEATURES) == compat &&
- (ro & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
- (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
+ if ((compat & JBD2_KNOWN_COMPAT_FEATURES) == compat &&
+ (ro & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro &&
+ (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat)
return 1;
return 0;
}
/**
- * int journal_set_features () - Mark a given journal feature in the superblock
+ * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
* @journal: Journal to act on.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
@@ -1244,15 +1244,15 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
*
*/
-int journal_set_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
- if (journal_check_used_features(journal, compat, ro, incompat))
+ if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
return 1;
- if (!journal_check_available_features(journal, compat, ro, incompat))
+ if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
return 0;
jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
@@ -1269,13 +1269,13 @@ int journal_set_features (journal_t *journal, unsigned long compat,
/**
- * int journal_update_format () - Update on-disk journal structure.
+ * int jbd2_journal_update_format () - Update on-disk journal structure.
* @journal: Journal to act on.
*
* Given an initialised but unloaded journal struct, poke about in the
* on-disk structure to update it to the most recent supported version.
*/
-int journal_update_format (journal_t *journal)
+int jbd2_journal_update_format (journal_t *journal)
{
journal_superblock_t *sb;
int err;
@@ -1287,9 +1287,9 @@ int journal_update_format (journal_t *journal)
sb = journal->j_superblock;
switch (be32_to_cpu(sb->s_header.h_blocktype)) {
- case JFS_SUPERBLOCK_V2:
+ case JBD2_SUPERBLOCK_V2:
return 0;
- case JFS_SUPERBLOCK_V1:
+ case JBD2_SUPERBLOCK_V1:
return journal_convert_superblock_v1(journal, sb);
default:
break;
@@ -1312,7 +1312,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
memset(&sb->s_feature_compat, 0, blocksize-offset);
sb->s_nr_users = cpu_to_be32(1);
- sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
+ sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
journal->j_format_version = 2;
bh = journal->j_sb_buffer;
@@ -1324,7 +1324,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
/**
- * int journal_flush () - Flush journal
+ * int jbd2_journal_flush () - Flush journal
* @journal: Journal to act on.
*
* Flush all data for a given journal to disk and empty the journal.
@@ -1332,7 +1332,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
* recovery does not need to happen on remount.
*/
-int journal_flush(journal_t *journal)
+int jbd2_journal_flush(journal_t *journal)
{
int err = 0;
transaction_t *transaction = NULL;
@@ -1343,7 +1343,7 @@ int journal_flush(journal_t *journal)
/* Force everything buffered to the log... */
if (journal->j_running_transaction) {
transaction = journal->j_running_transaction;
- __log_start_commit(journal, transaction->t_tid);
+ __jbd2_log_start_commit(journal, transaction->t_tid);
} else if (journal->j_committing_transaction)
transaction = journal->j_committing_transaction;
@@ -1352,7 +1352,7 @@ int journal_flush(journal_t *journal)
tid_t tid = transaction->t_tid;
spin_unlock(&journal->j_state_lock);
- log_wait_commit(journal, tid);
+ jbd2_log_wait_commit(journal, tid);
} else {
spin_unlock(&journal->j_state_lock);
}
@@ -1361,11 +1361,11 @@ int journal_flush(journal_t *journal)
spin_lock(&journal->j_list_lock);
while (!err && journal->j_checkpoint_transactions != NULL) {
spin_unlock(&journal->j_list_lock);
- err = log_do_checkpoint(journal);
+ err = jbd2_log_do_checkpoint(journal);
spin_lock(&journal->j_list_lock);
}
spin_unlock(&journal->j_list_lock);
- cleanup_journal_tail(journal);
+ jbd2_cleanup_journal_tail(journal);
/* Finally, mark the journal as really needing no recovery.
* This sets s_start==0 in the underlying superblock, which is
@@ -1376,7 +1376,7 @@ int journal_flush(journal_t *journal)
old_tail = journal->j_tail;
journal->j_tail = 0;
spin_unlock(&journal->j_state_lock);
- journal_update_superblock(journal, 1);
+ jbd2_journal_update_superblock(journal, 1);
spin_lock(&journal->j_state_lock);
journal->j_tail = old_tail;
@@ -1390,24 +1390,24 @@ int journal_flush(journal_t *journal)
}
/**
- * int journal_wipe() - Wipe journal contents
+ * int jbd2_journal_wipe() - Wipe journal contents
* @journal: Journal to act on.
* @write: flag (see below)
*
* Wipe out all of the contents of a journal, safely. This will produce
* a warning if the journal contains any valid recovery information.
- * Must be called between journal_init_*() and journal_load().
+ * Must be called between journal_init_*() and jbd2_journal_load().
*
* If 'write' is non-zero, then we wipe out the journal on disk; otherwise
* we merely suppress recovery.
*/
-int journal_wipe(journal_t *journal, int write)
+int jbd2_journal_wipe(journal_t *journal, int write)
{
journal_superblock_t *sb;
int err = 0;
- J_ASSERT (!(journal->j_flags & JFS_LOADED));
+ J_ASSERT (!(journal->j_flags & JBD2_LOADED));
err = load_superblock(journal);
if (err)
@@ -1421,9 +1421,9 @@ int journal_wipe(journal_t *journal, int write)
printk (KERN_WARNING "JBD: %s recovery information on journal\n",
write ? "Clearing" : "Ignoring");
- err = journal_skip_recovery(journal);
+ err = jbd2_journal_skip_recovery(journal);
if (write)
- journal_update_superblock(journal, 1);
+ jbd2_journal_update_superblock(journal, 1);
no_recovery:
return err;
@@ -1459,22 +1459,22 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
* Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
* and don't attempt to make any other journal updates.
*/
-void __journal_abort_hard(journal_t *journal)
+void __jbd2_journal_abort_hard(journal_t *journal)
{
transaction_t *transaction;
char b[BDEVNAME_SIZE];
- if (journal->j_flags & JFS_ABORT)
+ if (journal->j_flags & JBD2_ABORT)
return;
printk(KERN_ERR "Aborting journal on device %s.\n",
journal_dev_name(journal, b));
spin_lock(&journal->j_state_lock);
- journal->j_flags |= JFS_ABORT;
+ journal->j_flags |= JBD2_ABORT;
transaction = journal->j_running_transaction;
if (transaction)
- __log_start_commit(journal, transaction->t_tid);
+ __jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
}
@@ -1482,20 +1482,20 @@ void __journal_abort_hard(journal_t *journal)
* but don't do any other IO. */
static void __journal_abort_soft (journal_t *journal, int errno)
{
- if (journal->j_flags & JFS_ABORT)
+ if (journal->j_flags & JBD2_ABORT)
return;
if (!journal->j_errno)
journal->j_errno = errno;
- __journal_abort_hard(journal);
+ __jbd2_journal_abort_hard(journal);
if (errno)
- journal_update_superblock(journal, 1);
+ jbd2_journal_update_superblock(journal, 1);
}
/**
- * void journal_abort () - Shutdown the journal immediately.
+ * void jbd2_journal_abort () - Shutdown the journal immediately.
* @journal: the journal to shutdown.
* @errno: an error number to record in the journal indicating
* the reason for the shutdown.
@@ -1504,7 +1504,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* journal (not of a single transaction). This operation cannot be
* undone without closing and reopening the journal.
*
- * The journal_abort function is intended to support higher level error
+ * The jbd2_journal_abort function is intended to support higher level error
* recovery mechanisms such as the ext2/ext3 remount-readonly error
* mode.
*
@@ -1520,13 +1520,13 @@ static void __journal_abort_soft (journal_t *journal, int errno)
*
* Any attempt to get a new transaction handle on a journal which is in
* ABORT state will just result in an -EROFS error return. A
- * journal_stop on an existing handle will return -EIO if we have
+ * jbd2_journal_stop on an existing handle will return -EIO if we have
* entered abort state during the update.
*
* Recursive transactions are not disturbed by journal abort until the
- * final journal_stop, which will receive the -EIO error.
+ * final jbd2_journal_stop, which will receive the -EIO error.
*
- * Finally, the journal_abort call allows the caller to supply an errno
+ * Finally, the jbd2_journal_abort call allows the caller to supply an errno
* which will be recorded (if possible) in the journal superblock. This
* allows a client to record failure conditions in the middle of a
* transaction without having to complete the transaction to record the
@@ -1540,28 +1540,28 @@ static void __journal_abort_soft (journal_t *journal, int errno)
*
*/
-void journal_abort(journal_t *journal, int errno)
+void jbd2_journal_abort(journal_t *journal, int errno)
{
__journal_abort_soft(journal, errno);
}
/**
- * int journal_errno () - returns the journal's error state.
+ * int jbd2_journal_errno () - returns the journal's error state.
* @journal: journal to examine.
*
- * This is the errno numbet set with journal_abort(), the last
+ * This is the errno numbet set with jbd2_journal_abort(), the last
* time the journal was mounted - if the journal was stopped
* without calling abort this will be 0.
*
* If the journal has been aborted on this mount time -EROFS will
* be returned.
*/
-int journal_errno(journal_t *journal)
+int jbd2_journal_errno(journal_t *journal)
{
int err;
spin_lock(&journal->j_state_lock);
- if (journal->j_flags & JFS_ABORT)
+ if (journal->j_flags & JBD2_ABORT)
err = -EROFS;
else
err = journal->j_errno;
@@ -1570,18 +1570,18 @@ int journal_errno(journal_t *journal)
}
/**
- * int journal_clear_err () - clears the journal's error state
+ * int jbd2_journal_clear_err () - clears the journal's error state
* @journal: journal to act on.
*
* An error must be cleared or Acked to take a FS out of readonly
* mode.
*/
-int journal_clear_err(journal_t *journal)
+int jbd2_journal_clear_err(journal_t *journal)
{
int err = 0;
spin_lock(&journal->j_state_lock);
- if (journal->j_flags & JFS_ABORT)
+ if (journal->j_flags & JBD2_ABORT)
err = -EROFS;
else
journal->j_errno = 0;
@@ -1590,21 +1590,21 @@ int journal_clear_err(journal_t *journal)
}
/**
- * void journal_ack_err() - Ack journal err.
+ * void jbd2_journal_ack_err() - Ack journal err.
* @journal: journal to act on.
*
* An error must be cleared or Acked to take a FS out of readonly
* mode.
*/
-void journal_ack_err(journal_t *journal)
+void jbd2_journal_ack_err(journal_t *journal)
{
spin_lock(&journal->j_state_lock);
if (journal->j_errno)
- journal->j_flags |= JFS_ACK_ERR;
+ journal->j_flags |= JBD2_ACK_ERR;
spin_unlock(&journal->j_state_lock);
}
-int journal_blocks_per_page(struct inode *inode)
+int jbd2_journal_blocks_per_page(struct inode *inode)
{
return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
}
@@ -1613,7 +1613,7 @@ int journal_blocks_per_page(struct inode *inode)
* Simple support for retrying memory allocations. Introduced to help to
* debug different VM deadlock avoidance strategies.
*/
-void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
+void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
{
return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
}
@@ -1634,7 +1634,7 @@ static const char *jbd_slab_names[JBD_MAX_SLABS] = {
"jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
};
-static void journal_destroy_jbd_slabs(void)
+static void jbd2_journal_destroy_jbd_slabs(void)
{
int i;
@@ -1645,7 +1645,7 @@ static void journal_destroy_jbd_slabs(void)
}
}
-static int journal_create_jbd_slab(size_t slab_size)
+static int jbd2_journal_create_jbd_slab(size_t slab_size)
{
int i = JBD_SLAB_INDEX(slab_size);
@@ -1671,7 +1671,7 @@ static int journal_create_jbd_slab(size_t slab_size)
return 0;
}
-void * jbd_slab_alloc(size_t size, gfp_t flags)
+void * jbd2_slab_alloc(size_t size, gfp_t flags)
{
int idx;
@@ -1680,7 +1680,7 @@ void * jbd_slab_alloc(size_t size, gfp_t flags)
return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
}
-void jbd_slab_free(void *ptr, size_t size)
+void jbd2_slab_free(void *ptr, size_t size)
{
int idx;
@@ -1692,35 +1692,35 @@ void jbd_slab_free(void *ptr, size_t size)
/*
* Journal_head storage management
*/
-static kmem_cache_t *journal_head_cache;
+static kmem_cache_t *jbd2_journal_head_cache;
#ifdef CONFIG_JBD_DEBUG
static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif
-static int journal_init_journal_head_cache(void)
+static int journal_init_jbd2_journal_head_cache(void)
{
int retval;
- J_ASSERT(journal_head_cache == 0);
- journal_head_cache = kmem_cache_create("journal_head",
+ J_ASSERT(jbd2_journal_head_cache == 0);
+ jbd2_journal_head_cache = kmem_cache_create("journal_head",
sizeof(struct journal_head),
0, /* offset */
0, /* flags */
NULL, /* ctor */
NULL); /* dtor */
retval = 0;
- if (journal_head_cache == 0) {
+ if (jbd2_journal_head_cache == 0) {
retval = -ENOMEM;
printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
}
return retval;
}
-static void journal_destroy_journal_head_cache(void)
+static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
{
- J_ASSERT(journal_head_cache != NULL);
- kmem_cache_destroy(journal_head_cache);
- journal_head_cache = NULL;
+ J_ASSERT(jbd2_journal_head_cache != NULL);
+ kmem_cache_destroy(jbd2_journal_head_cache);
+ jbd2_journal_head_cache = NULL;
}
/*
@@ -1734,7 +1734,7 @@ static struct journal_head *journal_alloc_journal_head(void)
#ifdef CONFIG_JBD_DEBUG
atomic_inc(&nr_journal_heads);
#endif
- ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
+ ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
if (ret == 0) {
jbd_debug(1, "out of memory for journal_head\n");
if (time_after(jiffies, last_warning + 5*HZ)) {
@@ -1744,7 +1744,7 @@ static struct journal_head *journal_alloc_journal_head(void)
}
while (ret == 0) {
yield();
- ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
+ ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
}
}
return ret;
@@ -1756,7 +1756,7 @@ static void journal_free_journal_head(struct journal_head *jh)
atomic_dec(&nr_journal_heads);
memset(jh, JBD_POISON_FREE, sizeof(*jh));
#endif
- kmem_cache_free(journal_head_cache, jh);
+ kmem_cache_free(jbd2_journal_head_cache, jh);
}
/*
@@ -1775,22 +1775,22 @@ static void journal_free_journal_head(struct journal_head *jh)
*
* A journal_head may be detached from its buffer_head when the journal_head's
* b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
- * Various places in JBD call journal_remove_journal_head() to indicate that the
+ * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
* journal_head can be dropped if needed.
*
* Various places in the kernel want to attach a journal_head to a buffer_head
* _before_ attaching the journal_head to a transaction. To protect the
- * journal_head in this situation, journal_add_journal_head elevates the
+ * journal_head in this situation, jbd2_journal_add_journal_head elevates the
* journal_head's b_jcount refcount by one. The caller must call
- * journal_put_journal_head() to undo this.
+ * jbd2_journal_put_journal_head() to undo this.
*
* So the typical usage would be:
*
* (Attach a journal_head if needed. Increments b_jcount)
- * struct journal_head *jh = journal_add_journal_head(bh);
+ * struct journal_head *jh = jbd2_journal_add_journal_head(bh);
* ...
* jh->b_transaction = xxx;
- * journal_put_journal_head(jh);
+ * jbd2_journal_put_journal_head(jh);
*
* Now, the journal_head's b_jcount is zero, but it is safe from being released
* because it has a non-zero b_transaction.
@@ -1802,7 +1802,7 @@ static void journal_free_journal_head(struct journal_head *jh)
* Doesn't need the journal lock.
* May sleep.
*/
-struct journal_head *journal_add_journal_head(struct buffer_head *bh)
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
{
struct journal_head *jh;
struct journal_head *new_jh = NULL;
@@ -1845,7 +1845,7 @@ repeat:
* Grab a ref against this buffer_head's journal_head. If it ended up not
* having a journal_head, return NULL
*/
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
{
struct journal_head *jh = NULL;
@@ -1877,13 +1877,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
__FUNCTION__);
- jbd_slab_free(jh->b_frozen_data, bh->b_size);
+ jbd2_slab_free(jh->b_frozen_data, bh->b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
__FUNCTION__);
- jbd_slab_free(jh->b_committed_data, bh->b_size);
+ jbd2_slab_free(jh->b_committed_data, bh->b_size);
}
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
@@ -1897,7 +1897,7 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
}
/*
- * journal_remove_journal_head(): if the buffer isn't attached to a transaction
+ * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
* and has a zero b_jcount then remove and release its journal_head. If we did
* see that the buffer is not used by any transaction we also "logically"
* decrement ->b_count.
@@ -1905,11 +1905,11 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
* We in fact take an additional increment on ->b_count as a convenience,
* because the caller usually wants to do additional things with the bh
* after calling here.
- * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some
+ * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
* time. Once the caller has run __brelse(), the buffer is eligible for
* reaping by try_to_free_buffers().
*/
-void journal_remove_journal_head(struct buffer_head *bh)
+void jbd2_journal_remove_journal_head(struct buffer_head *bh)
{
jbd_lock_bh_journal_head(bh);
__journal_remove_journal_head(bh);
@@ -1920,7 +1920,7 @@ void journal_remove_journal_head(struct buffer_head *bh)
* Drop a reference on the passed journal_head. If it fell to zero then try to
* release the journal_head from the buffer_head.
*/
-void journal_put_journal_head(struct journal_head *jh)
+void jbd2_journal_put_journal_head(struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
@@ -1938,8 +1938,8 @@ void journal_put_journal_head(struct journal_head *jh)
* /proc tunables
*/
#if defined(CONFIG_JBD_DEBUG)
-int journal_enable_debug;
-EXPORT_SYMBOL(journal_enable_debug);
+int jbd2_journal_enable_debug;
+EXPORT_SYMBOL(jbd2_journal_enable_debug);
#endif
#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
@@ -1951,7 +1951,7 @@ static int read_jbd_debug(char *page, char **start, off_t off,
{
int ret;
- ret = sprintf(page + off, "%d\n", journal_enable_debug);
+ ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
*eof = 1;
return ret;
}
@@ -1966,11 +1966,11 @@ static int write_jbd_debug(struct file *file, const char __user *buffer,
if (copy_from_user(buf, buffer, count))
return -EFAULT;
buf[ARRAY_SIZE(buf) - 1] = '\0';
- journal_enable_debug = simple_strtoul(buf, NULL, 10);
+ jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
return count;
}
-#define JBD_PROC_NAME "sys/fs/jbd-debug"
+#define JBD_PROC_NAME "sys/fs/jbd2-debug"
static void __init create_jbd_proc_entry(void)
{
@@ -1982,7 +1982,7 @@ static void __init create_jbd_proc_entry(void)
}
}
-static void __exit remove_jbd_proc_entry(void)
+static void __exit jbd2_remove_jbd_proc_entry(void)
{
if (proc_jbd_debug)
remove_proc_entry(JBD_PROC_NAME, NULL);
@@ -1991,31 +1991,31 @@ static void __exit remove_jbd_proc_entry(void)
#else
#define create_jbd_proc_entry() do {} while (0)
-#define remove_jbd_proc_entry() do {} while (0)
+#define jbd2_remove_jbd_proc_entry() do {} while (0)
#endif
-kmem_cache_t *jbd_handle_cache;
+kmem_cache_t *jbd2_handle_cache;
static int __init journal_init_handle_cache(void)
{
- jbd_handle_cache = kmem_cache_create("journal_handle",
+ jbd2_handle_cache = kmem_cache_create("journal_handle",
sizeof(handle_t),
0, /* offset */
0, /* flags */
NULL, /* ctor */
NULL); /* dtor */
- if (jbd_handle_cache == NULL) {
+ if (jbd2_handle_cache == NULL) {
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
}
return 0;
}
-static void journal_destroy_handle_cache(void)
+static void jbd2_journal_destroy_handle_cache(void)
{
- if (jbd_handle_cache)
- kmem_cache_destroy(jbd_handle_cache);
+ if (jbd2_handle_cache)
+ kmem_cache_destroy(jbd2_handle_cache);
}
/*
@@ -2026,20 +2026,20 @@ static int __init journal_init_caches(void)
{
int ret;
- ret = journal_init_revoke_caches();
+ ret = jbd2_journal_init_revoke_caches();
if (ret == 0)
- ret = journal_init_journal_head_cache();
+ ret = journal_init_jbd2_journal_head_cache();
if (ret == 0)
ret = journal_init_handle_cache();
return ret;
}
-static void journal_destroy_caches(void)
+static void jbd2_journal_destroy_caches(void)
{
- journal_destroy_revoke_caches();
- journal_destroy_journal_head_cache();
- journal_destroy_handle_cache();
- journal_destroy_jbd_slabs();
+ jbd2_journal_destroy_revoke_caches();
+ jbd2_journal_destroy_jbd2_journal_head_cache();
+ jbd2_journal_destroy_handle_cache();
+ jbd2_journal_destroy_jbd_slabs();
}
static int __init journal_init(void)
@@ -2050,7 +2050,7 @@ static int __init journal_init(void)
ret = journal_init_caches();
if (ret != 0)
- journal_destroy_caches();
+ jbd2_journal_destroy_caches();
create_jbd_proc_entry();
return ret;
}
@@ -2062,8 +2062,8 @@ static void __exit journal_exit(void)
if (n)
printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
#endif
- remove_jbd_proc_entry();
- journal_destroy_caches();
+ jbd2_remove_jbd_proc_entry();
+ jbd2_journal_destroy_caches();
}
MODULE_LICENSE("GPL");
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 11563fe2a52..b2012d11243 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -18,7 +18,7 @@
#else
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#endif
@@ -86,7 +86,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
nbufs = 0;
for (next = start; next < max; next++) {
- err = journal_bmap(journal, next, &blocknr);
+ err = jbd2_journal_bmap(journal, next, &blocknr);
if (err) {
printk (KERN_ERR "JBD: bad block at offset %u\n",
@@ -142,7 +142,7 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
return -EIO;
}
- err = journal_bmap(journal, offset, &blocknr);
+ err = jbd2_journal_bmap(journal, offset, &blocknr);
if (err) {
printk (KERN_ERR "JBD: bad block at offset %u\n",
@@ -191,10 +191,10 @@ static int count_tags(struct buffer_head *bh, int size)
nr++;
tagp += sizeof(journal_block_tag_t);
- if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
+ if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
tagp += 16;
- if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
+ if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
break;
}
@@ -210,7 +210,7 @@ do { \
} while (0)
/**
- * journal_recover - recovers a on-disk journal
+ * jbd2_journal_recover - recovers a on-disk journal
* @journal: the journal to recover
*
* The primary function for recovering the log contents when mounting a
@@ -221,7 +221,7 @@ do { \
* blocks. In the third and final pass, we replay any un-revoked blocks
* in the log.
*/
-int journal_recover(journal_t *journal)
+int jbd2_journal_recover(journal_t *journal)
{
int err;
journal_superblock_t * sb;
@@ -260,13 +260,13 @@ int journal_recover(journal_t *journal)
* any existing commit records in the log. */
journal->j_transaction_sequence = ++info.end_transaction;
- journal_clear_revoke(journal);
+ jbd2_journal_clear_revoke(journal);
sync_blockdev(journal->j_fs_dev);
return err;
}
/**
- * journal_skip_recovery - Start journal and wipe exiting records
+ * jbd2_journal_skip_recovery - Start journal and wipe exiting records
* @journal: journal to startup
*
* Locate any valid recovery information from the journal and set up the
@@ -278,7 +278,7 @@ int journal_recover(journal_t *journal)
* much recovery information is being erased, and to let us initialise
* the journal transaction sequence numbers to the next unused ID.
*/
-int journal_skip_recovery(journal_t *journal)
+int jbd2_journal_skip_recovery(journal_t *journal)
{
int err;
journal_superblock_t * sb;
@@ -387,7 +387,7 @@ static int do_one_pass(journal_t *journal,
tmp = (journal_header_t *)bh->b_data;
- if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
+ if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
brelse(bh);
break;
}
@@ -407,7 +407,7 @@ static int do_one_pass(journal_t *journal,
* to do with it? That depends on the pass... */
switch(blocktype) {
- case JFS_DESCRIPTOR_BLOCK:
+ case JBD2_DESCRIPTOR_BLOCK:
/* If it is a valid descriptor block, replay it
* in pass REPLAY; otherwise, just skip over the
* blocks it describes. */
@@ -451,7 +451,7 @@ static int do_one_pass(journal_t *journal,
/* If the block has been
* revoked, then we're all done
* here. */
- if (journal_test_revoke
+ if (jbd2_journal_test_revoke
(journal, blocknr,
next_commit_ID)) {
brelse(obh);
@@ -477,9 +477,9 @@ static int do_one_pass(journal_t *journal,
lock_buffer(nbh);
memcpy(nbh->b_data, obh->b_data,
journal->j_blocksize);
- if (flags & JFS_FLAG_ESCAPE) {
+ if (flags & JBD2_FLAG_ESCAPE) {
*((__be32 *)bh->b_data) =
- cpu_to_be32(JFS_MAGIC_NUMBER);
+ cpu_to_be32(JBD2_MAGIC_NUMBER);
}
BUFFER_TRACE(nbh, "marking dirty");
@@ -495,17 +495,17 @@ static int do_one_pass(journal_t *journal,
skip_write:
tagp += sizeof(journal_block_tag_t);
- if (!(flags & JFS_FLAG_SAME_UUID))
+ if (!(flags & JBD2_FLAG_SAME_UUID))
tagp += 16;
- if (flags & JFS_FLAG_LAST_TAG)
+ if (flags & JBD2_FLAG_LAST_TAG)
break;
}
brelse(bh);
continue;
- case JFS_COMMIT_BLOCK:
+ case JBD2_COMMIT_BLOCK:
/* Found an expected commit block: not much to
* do other than move on to the next sequence
* number. */
@@ -513,7 +513,7 @@ static int do_one_pass(journal_t *journal,
next_commit_ID++;
continue;
- case JFS_REVOKE_BLOCK:
+ case JBD2_REVOKE_BLOCK:
/* If we aren't in the REVOKE pass, then we can
* just skip over this block. */
if (pass != PASS_REVOKE) {
@@ -570,11 +570,11 @@ static int do_one_pass(journal_t *journal,
static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
tid_t sequence, struct recovery_info *info)
{
- journal_revoke_header_t *header;
+ jbd2_journal_revoke_header_t *header;
int offset, max;
- header = (journal_revoke_header_t *) bh->b_data;
- offset = sizeof(journal_revoke_header_t);
+ header = (jbd2_journal_revoke_header_t *) bh->b_data;
+ offset = sizeof(jbd2_journal_revoke_header_t);
max = be32_to_cpu(header->r_count);
while (offset < max) {
@@ -583,7 +583,7 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
offset += 4;
- err = journal_set_revoke(journal, blocknr, sequence);
+ err = jbd2_journal_set_revoke(journal, blocknr, sequence);
if (err)
return err;
++info->nr_revokes;
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index c532429d8d9..2fccddc7aca 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -62,7 +62,7 @@
#else
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -70,14 +70,14 @@
#include <linux/init.h>
#endif
-static kmem_cache_t *revoke_record_cache;
-static kmem_cache_t *revoke_table_cache;
+static kmem_cache_t *jbd2_revoke_record_cache;
+static kmem_cache_t *jbd2_revoke_table_cache;
/* Each revoke record represents one single revoked block. During
journal replay, this involves recording the transaction ID of the
last transaction to revoke this block. */
-struct jbd_revoke_record_s
+struct jbd2_revoke_record_s
{
struct list_head hash;
tid_t sequence; /* Used for recovery only */
@@ -86,7 +86,7 @@ struct jbd_revoke_record_s
/* The revoke table is just a simple hash table of revoke records. */
-struct jbd_revoke_table_s
+struct jbd2_revoke_table_s
{
/* It is conceivable that we might want a larger hash table
* for recovery. Must be a power of two. */
@@ -99,7 +99,7 @@ struct jbd_revoke_table_s
#ifdef __KERNEL__
static void write_one_revoke_record(journal_t *, transaction_t *,
struct journal_head **, int *,
- struct jbd_revoke_record_s *);
+ struct jbd2_revoke_record_s *);
static void flush_descriptor(journal_t *, struct journal_head *, int);
#endif
@@ -108,7 +108,7 @@ static void flush_descriptor(journal_t *, struct journal_head *, int);
/* Borrowed from buffer.c: this is a tried and tested block hash function */
static inline int hash(journal_t *journal, unsigned long block)
{
- struct jbd_revoke_table_s *table = journal->j_revoke;
+ struct jbd2_revoke_table_s *table = journal->j_revoke;
int hash_shift = table->hash_shift;
return ((block << (hash_shift - 6)) ^
@@ -120,10 +120,10 @@ static int insert_revoke_hash(journal_t *journal, unsigned long blocknr,
tid_t seq)
{
struct list_head *hash_list;
- struct jbd_revoke_record_s *record;
+ struct jbd2_revoke_record_s *record;
repeat:
- record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
+ record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
if (!record)
goto oom;
@@ -145,57 +145,57 @@ oom:
/* Find a revoke record in the journal's hash table. */
-static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
+static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
unsigned long blocknr)
{
struct list_head *hash_list;
- struct jbd_revoke_record_s *record;
+ struct jbd2_revoke_record_s *record;
hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
spin_lock(&journal->j_revoke_lock);
- record = (struct jbd_revoke_record_s *) hash_list->next;
+ record = (struct jbd2_revoke_record_s *) hash_list->next;
while (&(record->hash) != hash_list) {
if (record->blocknr == blocknr) {
spin_unlock(&journal->j_revoke_lock);
return record;
}
- record = (struct jbd_revoke_record_s *) record->hash.next;
+ record = (struct jbd2_revoke_record_s *) record->hash.next;
}
spin_unlock(&journal->j_revoke_lock);
return NULL;
}
-int __init journal_init_revoke_caches(void)
+int __init jbd2_journal_init_revoke_caches(void)
{
- revoke_record_cache = kmem_cache_create("revoke_record",
- sizeof(struct jbd_revoke_record_s),
+ jbd2_revoke_record_cache = kmem_cache_create("revoke_record",
+ sizeof(struct jbd2_revoke_record_s),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (revoke_record_cache == 0)
+ if (jbd2_revoke_record_cache == 0)
return -ENOMEM;
- revoke_table_cache = kmem_cache_create("revoke_table",
- sizeof(struct jbd_revoke_table_s),
+ jbd2_revoke_table_cache = kmem_cache_create("revoke_table",
+ sizeof(struct jbd2_revoke_table_s),
0, 0, NULL, NULL);
- if (revoke_table_cache == 0) {
- kmem_cache_destroy(revoke_record_cache);
- revoke_record_cache = NULL;
+ if (jbd2_revoke_table_cache == 0) {
+ kmem_cache_destroy(jbd2_revoke_record_cache);
+ jbd2_revoke_record_cache = NULL;
return -ENOMEM;
}
return 0;
}
-void journal_destroy_revoke_caches(void)
+void jbd2_journal_destroy_revoke_caches(void)
{
- kmem_cache_destroy(revoke_record_cache);
- revoke_record_cache = NULL;
- kmem_cache_destroy(revoke_table_cache);
- revoke_table_cache = NULL;
+ kmem_cache_destroy(jbd2_revoke_record_cache);
+ jbd2_revoke_record_cache = NULL;
+ kmem_cache_destroy(jbd2_revoke_table_cache);
+ jbd2_revoke_table_cache = NULL;
}
/* Initialise the revoke table for a given journal to a given size. */
-int journal_init_revoke(journal_t *journal, int hash_size)
+int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
{
int shift, tmp;
@@ -206,7 +206,7 @@ int journal_init_revoke(journal_t *journal, int hash_size)
while((tmp >>= 1UL) != 0UL)
shift++;
- journal->j_revoke_table[0] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
+ journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!journal->j_revoke_table[0])
return -ENOMEM;
journal->j_revoke = journal->j_revoke_table[0];
@@ -221,7 +221,7 @@ int journal_init_revoke(journal_t *journal, int hash_size)
journal->j_revoke->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!journal->j_revoke->hash_table) {
- kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+ kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
journal->j_revoke = NULL;
return -ENOMEM;
}
@@ -229,10 +229,10 @@ int journal_init_revoke(journal_t *journal, int hash_size)
for (tmp = 0; tmp < hash_size; tmp++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
- journal->j_revoke_table[1] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
+ journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!journal->j_revoke_table[1]) {
kfree(journal->j_revoke_table[0]->hash_table);
- kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+ kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
return -ENOMEM;
}
@@ -249,8 +249,8 @@ int journal_init_revoke(journal_t *journal, int hash_size)
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!journal->j_revoke->hash_table) {
kfree(journal->j_revoke_table[0]->hash_table);
- kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
- kmem_cache_free(revoke_table_cache, journal->j_revoke_table[1]);
+ kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
+ kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
journal->j_revoke = NULL;
return -ENOMEM;
}
@@ -265,9 +265,9 @@ int journal_init_revoke(journal_t *journal, int hash_size)
/* Destoy a journal's revoke table. The table must already be empty! */
-void journal_destroy_revoke(journal_t *journal)
+void jbd2_journal_destroy_revoke(journal_t *journal)
{
- struct jbd_revoke_table_s *table;
+ struct jbd2_revoke_table_s *table;
struct list_head *hash_list;
int i;
@@ -281,7 +281,7 @@ void journal_destroy_revoke(journal_t *journal)
}
kfree(table->hash_table);
- kmem_cache_free(revoke_table_cache, table);
+ kmem_cache_free(jbd2_revoke_table_cache, table);
journal->j_revoke = NULL;
table = journal->j_revoke_table[1];
@@ -294,7 +294,7 @@ void journal_destroy_revoke(journal_t *journal)
}
kfree(table->hash_table);
- kmem_cache_free(revoke_table_cache, table);
+ kmem_cache_free(jbd2_revoke_table_cache, table);
journal->j_revoke = NULL;
}
@@ -302,7 +302,7 @@ void journal_destroy_revoke(journal_t *journal)
#ifdef __KERNEL__
/*
- * journal_revoke: revoke a given buffer_head from the journal. This
+ * jbd2_journal_revoke: revoke a given buffer_head from the journal. This
* prevents the block from being replayed during recovery if we take a
* crash after this current transaction commits. Any subsequent
* metadata writes of the buffer in this transaction cancel the
@@ -314,18 +314,18 @@ void journal_destroy_revoke(journal_t *journal)
* revoke before clearing the block bitmap when we are deleting
* metadata.
*
- * Revoke performs a journal_forget on any buffer_head passed in as a
+ * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
* parameter, but does _not_ forget the buffer_head if the bh was only
* found implicitly.
*
* bh_in may not be a journalled buffer - it may have come off
* the hash tables without an attached journal_head.
*
- * If bh_in is non-zero, journal_revoke() will decrement its b_count
+ * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
* by one.
*/
-int journal_revoke(handle_t *handle, unsigned long blocknr,
+int jbd2_journal_revoke(handle_t *handle, unsigned long blocknr,
struct buffer_head *bh_in)
{
struct buffer_head *bh = NULL;
@@ -338,7 +338,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
BUFFER_TRACE(bh_in, "enter");
journal = handle->h_transaction->t_journal;
- if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
+ if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
J_ASSERT (!"Cannot set revoke feature!");
return -EINVAL;
}
@@ -386,8 +386,8 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
set_buffer_revoked(bh);
set_buffer_revokevalid(bh);
if (bh_in) {
- BUFFER_TRACE(bh_in, "call journal_forget");
- journal_forget(handle, bh_in);
+ BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
+ jbd2_journal_forget(handle, bh_in);
} else {
BUFFER_TRACE(bh, "call brelse");
__brelse(bh);
@@ -403,7 +403,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
/*
* Cancel an outstanding revoke. For use only internally by the
- * journaling code (called from journal_get_write_access).
+ * journaling code (called from jbd2_journal_get_write_access).
*
* We trust buffer_revoked() on the buffer if the buffer is already
* being journaled: if there is no revoke pending on the buffer, then we
@@ -418,9 +418,9 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
*
* The caller must have the journal locked.
*/
-int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
{
- struct jbd_revoke_record_s *record;
+ struct jbd2_revoke_record_s *record;
journal_t *journal = handle->h_transaction->t_journal;
int need_cancel;
int did_revoke = 0; /* akpm: debug */
@@ -447,7 +447,7 @@ int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
spin_lock(&journal->j_revoke_lock);
list_del(&record->hash);
spin_unlock(&journal->j_revoke_lock);
- kmem_cache_free(revoke_record_cache, record);
+ kmem_cache_free(jbd2_revoke_record_cache, record);
did_revoke = 1;
}
}
@@ -478,7 +478,7 @@ int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
* we do not want to suspend any processing until all revokes are
* written -bzzz
*/
-void journal_switch_revoke_table(journal_t *journal)
+void jbd2_journal_switch_revoke_table(journal_t *journal)
{
int i;
@@ -498,12 +498,12 @@ void journal_switch_revoke_table(journal_t *journal)
* Called with the journal lock held.
*/
-void journal_write_revoke_records(journal_t *journal,
+void jbd2_journal_write_revoke_records(journal_t *journal,
transaction_t *transaction)
{
struct journal_head *descriptor;
- struct jbd_revoke_record_s *record;
- struct jbd_revoke_table_s *revoke;
+ struct jbd2_revoke_record_s *record;
+ struct jbd2_revoke_table_s *revoke;
struct list_head *hash_list;
int i, offset, count;
@@ -519,14 +519,14 @@ void journal_write_revoke_records(journal_t *journal,
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
- record = (struct jbd_revoke_record_s *)
+ record = (struct jbd2_revoke_record_s *)
hash_list->next;
write_one_revoke_record(journal, transaction,
&descriptor, &offset,
record);
count++;
list_del(&record->hash);
- kmem_cache_free(revoke_record_cache, record);
+ kmem_cache_free(jbd2_revoke_record_cache, record);
}
}
if (descriptor)
@@ -543,7 +543,7 @@ static void write_one_revoke_record(journal_t *journal,
transaction_t *transaction,
struct journal_head **descriptorp,
int *offsetp,
- struct jbd_revoke_record_s *record)
+ struct jbd2_revoke_record_s *record)
{
struct journal_head *descriptor;
int offset;
@@ -551,7 +551,7 @@ static void write_one_revoke_record(journal_t *journal,
/* If we are already aborting, this all becomes a noop. We
still need to go round the loop in
- journal_write_revoke_records in order to free all of the
+ jbd2_journal_write_revoke_records in order to free all of the
revoke records: only the IO to the journal is omitted. */
if (is_journal_aborted(journal))
return;
@@ -568,19 +568,19 @@ static void write_one_revoke_record(journal_t *journal,
}
if (!descriptor) {
- descriptor = journal_get_descriptor_buffer(journal);
+ descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor)
return;
header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
- header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
- header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
+ header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+ header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
header->h_sequence = cpu_to_be32(transaction->t_tid);
/* Record it so that we can wait for IO completion later */
JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
- journal_file_buffer(descriptor, transaction, BJ_LogCtl);
+ jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
- offset = sizeof(journal_revoke_header_t);
+ offset = sizeof(jbd2_journal_revoke_header_t);
*descriptorp = descriptor;
}
@@ -601,7 +601,7 @@ static void flush_descriptor(journal_t *journal,
struct journal_head *descriptor,
int offset)
{
- journal_revoke_header_t *header;
+ jbd2_journal_revoke_header_t *header;
struct buffer_head *bh = jh2bh(descriptor);
if (is_journal_aborted(journal)) {
@@ -609,7 +609,7 @@ static void flush_descriptor(journal_t *journal,
return;
}
- header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
+ header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
header->r_count = cpu_to_be32(offset);
set_buffer_jwrite(bh);
BUFFER_TRACE(bh, "write");
@@ -640,11 +640,11 @@ static void flush_descriptor(journal_t *journal,
* single block.
*/
-int journal_set_revoke(journal_t *journal,
+int jbd2_journal_set_revoke(journal_t *journal,
unsigned long blocknr,
tid_t sequence)
{
- struct jbd_revoke_record_s *record;
+ struct jbd2_revoke_record_s *record;
record = find_revoke_record(journal, blocknr);
if (record) {
@@ -664,11 +664,11 @@ int journal_set_revoke(journal_t *journal,
* ones, but later transactions still need replayed.
*/
-int journal_test_revoke(journal_t *journal,
+int jbd2_journal_test_revoke(journal_t *journal,
unsigned long blocknr,
tid_t sequence)
{
- struct jbd_revoke_record_s *record;
+ struct jbd2_revoke_record_s *record;
record = find_revoke_record(journal, blocknr);
if (!record)
@@ -683,21 +683,21 @@ int journal_test_revoke(journal_t *journal,
* that it can be reused by the running filesystem.
*/
-void journal_clear_revoke(journal_t *journal)
+void jbd2_journal_clear_revoke(journal_t *journal)
{
int i;
struct list_head *hash_list;
- struct jbd_revoke_record_s *record;
- struct jbd_revoke_table_s *revoke;
+ struct jbd2_revoke_record_s *record;
+ struct jbd2_revoke_table_s *revoke;
revoke = journal->j_revoke;
for (i = 0; i < revoke->hash_size; i++) {
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
- record = (struct jbd_revoke_record_s*) hash_list->next;
+ record = (struct jbd2_revoke_record_s*) hash_list->next;
list_del(&record->hash);
- kmem_cache_free(revoke_record_cache, record);
+ kmem_cache_free(jbd2_revoke_record_cache, record);
}
}
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index e1b3c8af4d1..149957bef90 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -19,7 +19,7 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -28,7 +28,7 @@
#include <linux/highmem.h>
/*
- * get_transaction: obtain a new transaction_t object.
+ * jbd2_get_transaction: obtain a new transaction_t object.
*
* Simply allocate and initialise a new transaction. Create it in
* RUNNING state and add it to the current journal (which should not
@@ -44,7 +44,7 @@
*/
static transaction_t *
-get_transaction(journal_t *journal, transaction_t *transaction)
+jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
{
transaction->t_journal = journal;
transaction->t_state = T_RUNNING;
@@ -115,7 +115,7 @@ repeat:
spin_lock(&journal->j_state_lock);
repeat_locked:
if (is_journal_aborted(journal) ||
- (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
+ (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
spin_unlock(&journal->j_state_lock);
ret = -EROFS;
goto out;
@@ -134,7 +134,7 @@ repeat_locked:
spin_unlock(&journal->j_state_lock);
goto alloc_transaction;
}
- get_transaction(journal, new_transaction);
+ jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
@@ -175,7 +175,7 @@ repeat_locked:
spin_unlock(&transaction->t_handle_lock);
prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
- __log_start_commit(journal, transaction->t_tid);
+ __jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
@@ -205,12 +205,12 @@ repeat_locked:
* committing_transaction->t_outstanding_credits plus "enough" for
* the log control blocks.
* Also, this test is inconsitent with the matching one in
- * journal_extend().
+ * jbd2_journal_extend().
*/
- if (__log_space_left(journal) < jbd_space_needed(journal)) {
+ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
spin_unlock(&transaction->t_handle_lock);
- __log_wait_for_space(journal);
+ __jbd2_log_wait_for_space(journal);
goto repeat_locked;
}
@@ -223,7 +223,7 @@ repeat_locked:
transaction->t_handle_count++;
jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
handle, nblocks, transaction->t_outstanding_credits,
- __log_space_left(journal));
+ __jbd2_log_space_left(journal));
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
out:
@@ -246,7 +246,7 @@ static handle_t *new_handle(int nblocks)
}
/**
- * handle_t *journal_start() - Obtain a new handle.
+ * handle_t *jbd2_journal_start() - Obtain a new handle.
* @journal: Journal to start transaction on.
* @nblocks: number of block buffer we might modify
*
@@ -259,7 +259,7 @@ static handle_t *new_handle(int nblocks)
*
* Return a pointer to a newly allocated handle, or NULL on failure
*/
-handle_t *journal_start(journal_t *journal, int nblocks)
+handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
{
handle_t *handle = journal_current_handle();
int err;
@@ -289,7 +289,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
}
/**
- * int journal_extend() - extend buffer credits.
+ * int jbd2_journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
* @nblocks: nr blocks to try to extend by.
*
@@ -298,7 +298,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
* a credit for a number of buffer modications in advance, but can
* extend its credit if it needs more.
*
- * journal_extend tries to give the running handle more buffer credits.
+ * jbd2_journal_extend tries to give the running handle more buffer credits.
* It does not guarantee that allocation - this is a best-effort only.
* The calling process MUST be able to deal cleanly with a failure to
* extend here.
@@ -308,7 +308,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
* return code < 0 implies an error
* return code > 0 implies normal transaction-full status.
*/
-int journal_extend(handle_t *handle, int nblocks)
+int jbd2_journal_extend(handle_t *handle, int nblocks)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -339,7 +339,7 @@ int journal_extend(handle_t *handle, int nblocks)
goto unlock;
}
- if (wanted > __log_space_left(journal)) {
+ if (wanted > __jbd2_log_space_left(journal)) {
jbd_debug(3, "denied handle %p %d blocks: "
"insufficient log space\n", handle, nblocks);
goto unlock;
@@ -360,21 +360,21 @@ out:
/**
- * int journal_restart() - restart a handle .
+ * int jbd2_journal_restart() - restart a handle .
* @handle: handle to restart
* @nblocks: nr credits requested
*
* Restart a handle for a multi-transaction filesystem
* operation.
*
- * If the journal_extend() call above fails to grant new buffer credits
- * to a running handle, a call to journal_restart will commit the
+ * If the jbd2_journal_extend() call above fails to grant new buffer credits
+ * to a running handle, a call to jbd2_journal_restart will commit the
* handle's transaction so far and reattach the handle to a new
* transaction capabable of guaranteeing the requested number of
* credits.
*/
-int journal_restart(handle_t *handle, int nblocks)
+int jbd2_journal_restart(handle_t *handle, int nblocks)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -402,7 +402,7 @@ int journal_restart(handle_t *handle, int nblocks)
spin_unlock(&transaction->t_handle_lock);
jbd_debug(2, "restarting handle %p\n", handle);
- __log_start_commit(journal, transaction->t_tid);
+ __jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
handle->h_buffer_credits = nblocks;
@@ -412,7 +412,7 @@ int journal_restart(handle_t *handle, int nblocks)
/**
- * void journal_lock_updates () - establish a transaction barrier.
+ * void jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
*
* This locks out any further updates from being started, and blocks
@@ -421,7 +421,7 @@ int journal_restart(handle_t *handle, int nblocks)
*
* The journal lock should not be held on entry.
*/
-void journal_lock_updates(journal_t *journal)
+void jbd2_journal_lock_updates(journal_t *journal)
{
DEFINE_WAIT(wait);
@@ -452,7 +452,7 @@ void journal_lock_updates(journal_t *journal)
/*
* We have now established a barrier against other normal updates, but
- * we also need to barrier against other journal_lock_updates() calls
+ * we also need to barrier against other jbd2_journal_lock_updates() calls
* to make sure that we serialise special journal-locked operations
* too.
*/
@@ -460,14 +460,14 @@ void journal_lock_updates(journal_t *journal)
}
/**
- * void journal_unlock_updates (journal_t* journal) - release barrier
+ * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
* @journal: Journal to release the barrier on.
*
- * Release a transaction barrier obtained with journal_lock_updates().
+ * Release a transaction barrier obtained with jbd2_journal_lock_updates().
*
* Should be called without the journal lock held.
*/
-void journal_unlock_updates (journal_t *journal)
+void jbd2_journal_unlock_updates (journal_t *journal)
{
J_ASSERT(journal->j_barrier_count != 0);
@@ -667,7 +667,7 @@ repeat:
JBUFFER_TRACE(jh, "allocate memory for buffer");
jbd_unlock_bh_state(bh);
frozen_buffer =
- jbd_slab_alloc(jh2bh(jh)->b_size,
+ jbd2_slab_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
printk(KERN_EMERG
@@ -699,7 +699,7 @@ repeat:
jh->b_transaction = transaction;
JBUFFER_TRACE(jh, "file as BJ_Reserved");
spin_lock(&journal->j_list_lock);
- __journal_file_buffer(jh, transaction, BJ_Reserved);
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
spin_unlock(&journal->j_list_lock);
}
@@ -723,18 +723,18 @@ done:
* If we are about to journal a buffer, then any revoke pending on it is
* no longer valid
*/
- journal_cancel_revoke(handle, jh);
+ jbd2_journal_cancel_revoke(handle, jh);
out:
if (unlikely(frozen_buffer)) /* It's usually NULL */
- jbd_slab_free(frozen_buffer, bh->b_size);
+ jbd2_slab_free(frozen_buffer, bh->b_size);
JBUFFER_TRACE(jh, "exit");
return error;
}
/**
- * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
+ * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
* @handle: transaction to add buffer modifications to
* @bh: bh to be used for metadata writes
* @credits: variable that will receive credits for the buffer
@@ -745,16 +745,16 @@ out:
* because we're write()ing a buffer which is also part of a shared mapping.
*/
-int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
{
- struct journal_head *jh = journal_add_journal_head(bh);
+ struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int rc;
/* We do not want to get caught playing with fields which the
* log thread also manipulates. Make sure that the buffer
* completes any outstanding IO before proceeding. */
rc = do_get_write_access(handle, jh, 0);
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
return rc;
}
@@ -772,17 +772,17 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
* unlocked buffer beforehand. */
/**
- * int journal_get_create_access () - notify intent to use newly created bh
+ * int jbd2_journal_get_create_access () - notify intent to use newly created bh
* @handle: transaction to new buffer to
* @bh: new buffer.
*
* Call this if you create a new bh.
*/
-int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
- struct journal_head *jh = journal_add_journal_head(bh);
+ struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int err;
jbd_debug(5, "journal_head %p\n", jh);
@@ -812,7 +812,7 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == NULL) {
jh->b_transaction = transaction;
JBUFFER_TRACE(jh, "file as BJ_Reserved");
- __journal_file_buffer(jh, transaction, BJ_Reserved);
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
} else if (jh->b_transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "set next transaction");
jh->b_next_transaction = transaction;
@@ -828,14 +828,14 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* which hits an assertion error.
*/
JBUFFER_TRACE(jh, "cancelling revoke");
- journal_cancel_revoke(handle, jh);
- journal_put_journal_head(jh);
+ jbd2_journal_cancel_revoke(handle, jh);
+ jbd2_journal_put_journal_head(jh);
out:
return err;
}
/**
- * int journal_get_undo_access() - Notify intent to modify metadata with
+ * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
* non-rewindable consequences
* @handle: transaction
* @bh: buffer to undo
@@ -848,7 +848,7 @@ out:
* since if we overwrote that space we would make the delete
* un-rewindable in case of a crash.
*
- * To deal with that, journal_get_undo_access requests write access to a
+ * To deal with that, jbd2_journal_get_undo_access requests write access to a
* buffer for parts of non-rewindable operations such as delete
* operations on the bitmaps. The journaling code must keep a copy of
* the buffer's contents prior to the undo_access call until such time
@@ -861,10 +861,10 @@ out:
*
* Returns error number or 0 on success.
*/
-int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
{
int err;
- struct journal_head *jh = journal_add_journal_head(bh);
+ struct journal_head *jh = jbd2_journal_add_journal_head(bh);
char *committed_data = NULL;
JBUFFER_TRACE(jh, "entry");
@@ -880,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
repeat:
if (!jh->b_committed_data) {
- committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
+ committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n",
__FUNCTION__);
@@ -905,14 +905,14 @@ repeat:
}
jbd_unlock_bh_state(bh);
out:
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
if (unlikely(committed_data))
- jbd_slab_free(committed_data, bh->b_size);
+ jbd2_slab_free(committed_data, bh->b_size);
return err;
}
/**
- * int journal_dirty_data() - mark a buffer as containing dirty data which
+ * int jbd2_journal_dirty_data() - mark a buffer as containing dirty data which
* needs to be flushed before we can commit the
* current transaction.
* @handle: transaction
@@ -923,10 +923,10 @@ out:
*
* Returns error number or 0 on success.
*
- * journal_dirty_data() can be called via page_launder->ext3_writepage
+ * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
* by kswapd.
*/
-int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
journal_t *journal = handle->h_transaction->t_journal;
int need_brelse = 0;
@@ -935,7 +935,7 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
if (is_handle_aborted(handle))
return 0;
- jh = journal_add_journal_head(bh);
+ jh = jbd2_journal_add_journal_head(bh);
JBUFFER_TRACE(jh, "entry");
/*
@@ -984,7 +984,7 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
* And while we're in that state, someone does a
* writepage() in an attempt to pageout the same area
* of the file via a shared mapping. At present that
- * calls journal_dirty_data(), and we get right here.
+ * calls jbd2_journal_dirty_data(), and we get right here.
* It may be too late to journal the data. Simply
* falling through to the next test will suffice: the
* data will be dirty and wil be checkpointed. The
@@ -1035,7 +1035,7 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
/* journal_clean_data_list() may have got there first */
if (jh->b_transaction != NULL) {
JBUFFER_TRACE(jh, "unfile from commit");
- __journal_temp_unlink_buffer(jh);
+ __jbd2_journal_temp_unlink_buffer(jh);
/* It still points to the committing
* transaction; move it to this one so
* that the refile assert checks are
@@ -1054,15 +1054,15 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
JBUFFER_TRACE(jh, "not on correct data list: unfile");
J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
- __journal_temp_unlink_buffer(jh);
+ __jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = handle->h_transaction;
JBUFFER_TRACE(jh, "file as data");
- __journal_file_buffer(jh, handle->h_transaction,
+ __jbd2_journal_file_buffer(jh, handle->h_transaction,
BJ_SyncData);
}
} else {
JBUFFER_TRACE(jh, "not on a transaction");
- __journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
+ __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
}
no_journal:
spin_unlock(&journal->j_list_lock);
@@ -1072,12 +1072,12 @@ no_journal:
__brelse(bh);
}
JBUFFER_TRACE(jh, "exit");
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
return 0;
}
/**
- * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
+ * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
* @handle: transaction to add buffer to.
* @bh: buffer to mark
*
@@ -1095,7 +1095,7 @@ no_journal:
* buffer: that only gets done when the old transaction finally
* completes its commit.
*/
-int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -1156,7 +1156,7 @@ int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "file as BJ_Metadata");
spin_lock(&journal->j_list_lock);
- __journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
+ __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
spin_unlock(&journal->j_list_lock);
out_unlock_bh:
jbd_unlock_bh_state(bh);
@@ -1166,18 +1166,18 @@ out:
}
/*
- * journal_release_buffer: undo a get_write_access without any buffer
+ * jbd2_journal_release_buffer: undo a get_write_access without any buffer
* updates, if the update decided in the end that it didn't need access.
*
*/
void
-journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
{
BUFFER_TRACE(bh, "entry");
}
/**
- * void journal_forget() - bforget() for potentially-journaled buffers.
+ * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
* @handle: transaction handle
* @bh: bh to 'forget'
*
@@ -1193,7 +1193,7 @@ journal_release_buffer(handle_t *handle, struct buffer_head *bh)
* Allow this call even if the handle has aborted --- it may be part of
* the caller's cleanup after an abort.
*/
-int journal_forget (handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -1250,11 +1250,11 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
*/
if (jh->b_cp_transaction) {
- __journal_temp_unlink_buffer(jh);
- __journal_file_buffer(jh, transaction, BJ_Forget);
+ __jbd2_journal_temp_unlink_buffer(jh);
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
} else {
- __journal_unfile_buffer(jh);
- journal_remove_journal_head(bh);
+ __jbd2_journal_unfile_buffer(jh);
+ jbd2_journal_remove_journal_head(bh);
__brelse(bh);
if (!buffer_jbd(bh)) {
spin_unlock(&journal->j_list_lock);
@@ -1292,7 +1292,7 @@ drop:
}
/**
- * int journal_stop() - complete a transaction
+ * int jbd2_journal_stop() - complete a transaction
* @handle: tranaction to complete.
*
* All done for a particular handle.
@@ -1302,12 +1302,12 @@ drop:
* complication is that we need to start a commit operation if the
* filesystem is marked for synchronous update.
*
- * journal_stop itself will not usually return an error, but it may
+ * jbd2_journal_stop itself will not usually return an error, but it may
* do so in unusual circumstances. In particular, expect it to
- * return -EIO if a journal_abort has been executed since the
+ * return -EIO if a jbd2_journal_abort has been executed since the
* transaction began.
*/
-int journal_stop(handle_t *handle)
+int jbd2_journal_stop(handle_t *handle)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -1383,15 +1383,15 @@ int journal_stop(handle_t *handle)
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
- __log_start_commit(journal, transaction->t_tid);
+ __jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
/*
- * Special case: JFS_SYNC synchronous updates require us
+ * Special case: JBD2_SYNC synchronous updates require us
* to wait for the commit to complete.
*/
if (handle->h_sync && !(current->flags & PF_MEMALLOC))
- err = log_wait_commit(journal, tid);
+ err = jbd2_log_wait_commit(journal, tid);
} else {
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
@@ -1401,24 +1401,24 @@ int journal_stop(handle_t *handle)
return err;
}
-/**int journal_force_commit() - force any uncommitted transactions
+/**int jbd2_journal_force_commit() - force any uncommitted transactions
* @journal: journal to force
*
* For synchronous operations: force any uncommitted transactions
* to disk. May seem kludgy, but it reuses all the handle batching
* code in a very simple manner.
*/
-int journal_force_commit(journal_t *journal)
+int jbd2_journal_force_commit(journal_t *journal)
{
handle_t *handle;
int ret;
- handle = journal_start(journal, 1);
+ handle = jbd2_journal_start(journal, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
} else {
handle->h_sync = 1;
- ret = journal_stop(handle);
+ ret = jbd2_journal_stop(handle);
}
return ret;
}
@@ -1486,7 +1486,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
*
* Called under j_list_lock. The journal may not be locked.
*/
-void __journal_temp_unlink_buffer(struct journal_head *jh)
+void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
{
struct journal_head **list = NULL;
transaction_t *transaction;
@@ -1538,23 +1538,23 @@ void __journal_temp_unlink_buffer(struct journal_head *jh)
mark_buffer_dirty(bh); /* Expose it to the VM */
}
-void __journal_unfile_buffer(struct journal_head *jh)
+void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
- __journal_temp_unlink_buffer(jh);
+ __jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
}
-void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
+void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
{
jbd_lock_bh_state(jh2bh(jh));
spin_lock(&journal->j_list_lock);
- __journal_unfile_buffer(jh);
+ __jbd2_journal_unfile_buffer(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(jh2bh(jh));
}
/*
- * Called from journal_try_to_free_buffers().
+ * Called from jbd2_journal_try_to_free_buffers().
*
* Called under jbd_lock_bh_state(bh)
*/
@@ -1576,16 +1576,16 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
/* A written-back ordered data buffer */
JBUFFER_TRACE(jh, "release data");
- __journal_unfile_buffer(jh);
- journal_remove_journal_head(bh);
+ __jbd2_journal_unfile_buffer(jh);
+ jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
} else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
/* written-back checkpointed metadata buffer */
if (jh->b_jlist == BJ_None) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- __journal_remove_checkpoint(jh);
- journal_remove_journal_head(bh);
+ __jbd2_journal_remove_checkpoint(jh);
+ jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
}
@@ -1596,7 +1596,7 @@ out:
/**
- * int journal_try_to_free_buffers() - try to free page buffers.
+ * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
* @page: to try and free
* @unused_gfp_mask: unused
@@ -1613,13 +1613,13 @@ out:
*
* This complicates JBD locking somewhat. We aren't protected by the
* BKL here. We wish to remove the buffer from its committing or
- * running transaction's ->t_datalist via __journal_unfile_buffer.
+ * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
*
* This may *change* the value of transaction_t->t_datalist, so anyone
* who looks at t_datalist needs to lock against this function.
*
- * Even worse, someone may be doing a journal_dirty_data on this
- * buffer. So we need to lock against that. journal_dirty_data()
+ * Even worse, someone may be doing a jbd2_journal_dirty_data on this
+ * buffer. So we need to lock against that. jbd2_journal_dirty_data()
* will come out of the lock with the buffer dirty, which makes it
* ineligible for release here.
*
@@ -1629,7 +1629,7 @@ out:
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*/
-int journal_try_to_free_buffers(journal_t *journal,
+int jbd2_journal_try_to_free_buffers(journal_t *journal,
struct page *page, gfp_t unused_gfp_mask)
{
struct buffer_head *head;
@@ -1646,15 +1646,15 @@ int journal_try_to_free_buffers(journal_t *journal,
/*
* We take our own ref against the journal_head here to avoid
* having to add tons of locking around each instance of
- * journal_remove_journal_head() and journal_put_journal_head().
+ * jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head().
*/
- jh = journal_grab_journal_head(bh);
+ jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
continue;
jbd_lock_bh_state(bh);
__journal_try_to_free_buffer(journal, bh);
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
jbd_unlock_bh_state(bh);
if (buffer_jbd(bh))
goto busy;
@@ -1681,23 +1681,23 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
int may_free = 1;
struct buffer_head *bh = jh2bh(jh);
- __journal_unfile_buffer(jh);
+ __jbd2_journal_unfile_buffer(jh);
if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction");
- __journal_file_buffer(jh, transaction, BJ_Forget);
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
clear_buffer_jbddirty(bh);
may_free = 0;
} else {
JBUFFER_TRACE(jh, "on running transaction");
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
__brelse(bh);
}
return may_free;
}
/*
- * journal_invalidatepage
+ * jbd2_journal_invalidatepage
*
* This code is tricky. It has a number of cases to deal with.
*
@@ -1765,7 +1765,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
- jh = journal_grab_journal_head(bh);
+ jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
goto zap_buffer_no_jh;
@@ -1796,7 +1796,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
ret = __dispose_buffer(jh,
journal->j_running_transaction);
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
@@ -1810,7 +1810,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
JBUFFER_TRACE(jh, "give to committing trans");
ret = __dispose_buffer(jh,
journal->j_committing_transaction);
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
@@ -1844,7 +1844,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
journal->j_running_transaction);
jh->b_next_transaction = NULL;
}
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
@@ -1861,7 +1861,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
}
zap_buffer:
- journal_put_journal_head(jh);
+ jbd2_journal_put_journal_head(jh);
zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
@@ -1877,7 +1877,7 @@ zap_buffer_unlocked:
}
/**
- * void journal_invalidatepage()
+ * void jbd2_journal_invalidatepage()
* @journal: journal to use for flush...
* @page: page to flush
* @offset: length of page to invalidate.
@@ -1885,7 +1885,7 @@ zap_buffer_unlocked:
* Reap page buffers containing data after offset in page.
*
*/
-void journal_invalidatepage(journal_t *journal,
+void jbd2_journal_invalidatepage(journal_t *journal,
struct page *page,
unsigned long offset)
{
@@ -1927,7 +1927,7 @@ void journal_invalidatepage(journal_t *journal,
/*
* File a buffer on the given transaction list.
*/
-void __journal_file_buffer(struct journal_head *jh,
+void __jbd2_journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
{
struct journal_head **list = NULL;
@@ -1956,7 +1956,7 @@ void __journal_file_buffer(struct journal_head *jh,
}
if (jh->b_transaction)
- __journal_temp_unlink_buffer(jh);
+ __jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = transaction;
switch (jlist) {
@@ -1998,12 +1998,12 @@ void __journal_file_buffer(struct journal_head *jh,
set_buffer_jbddirty(bh);
}
-void journal_file_buffer(struct journal_head *jh,
+void jbd2_journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
{
jbd_lock_bh_state(jh2bh(jh));
spin_lock(&transaction->t_journal->j_list_lock);
- __journal_file_buffer(jh, transaction, jlist);
+ __jbd2_journal_file_buffer(jh, transaction, jlist);
spin_unlock(&transaction->t_journal->j_list_lock);
jbd_unlock_bh_state(jh2bh(jh));
}
@@ -2018,7 +2018,7 @@ void journal_file_buffer(struct journal_head *jh,
*
* Called under jbd_lock_bh_state(jh2bh(jh))
*/
-void __journal_refile_buffer(struct journal_head *jh)
+void __jbd2_journal_refile_buffer(struct journal_head *jh)
{
int was_dirty;
struct buffer_head *bh = jh2bh(jh);
@@ -2029,7 +2029,7 @@ void __journal_refile_buffer(struct journal_head *jh)
/* If the buffer is now unused, just drop it. */
if (jh->b_next_transaction == NULL) {
- __journal_unfile_buffer(jh);
+ __jbd2_journal_unfile_buffer(jh);
return;
}
@@ -2039,10 +2039,10 @@ void __journal_refile_buffer(struct journal_head *jh)
*/
was_dirty = test_clear_buffer_jbddirty(bh);
- __journal_temp_unlink_buffer(jh);
+ __jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
- __journal_file_buffer(jh, jh->b_transaction,
+ __jbd2_journal_file_buffer(jh, jh->b_transaction,
was_dirty ? BJ_Metadata : BJ_Reserved);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
@@ -2054,26 +2054,26 @@ void __journal_refile_buffer(struct journal_head *jh)
* For the unlocked version of this call, also make sure that any
* hanging journal_head is cleaned up if necessary.
*
- * __journal_refile_buffer is usually called as part of a single locked
+ * __jbd2_journal_refile_buffer is usually called as part of a single locked
* operation on a buffer_head, in which the caller is probably going to
* be hooking the journal_head onto other lists. In that case it is up
* to the caller to remove the journal_head if necessary. For the
- * unlocked journal_refile_buffer call, the caller isn't going to be
+ * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
* doing anything else to the buffer so we need to do the cleanup
* ourselves to avoid a jh leak.
*
* *** The journal_head may be freed by this call! ***
*/
-void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
+void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
- __journal_refile_buffer(jh);
+ __jbd2_journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
+ jbd2_journal_remove_journal_head(bh);
spin_unlock(&journal->j_list_lock);
__brelse(bh);
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index 3dbf6c77903..99d37557cbb 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -1,5 +1,5 @@
/*
- * linux/include/linux/ext4_jbd.h
+ * linux/include/linux/ext4_jbd2.h
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
*
@@ -16,7 +16,7 @@
#define _LINUX_EXT4_JBD_H
#include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal)
@@ -116,7 +116,7 @@ static inline int
__ext4_journal_get_undo_access(const char *where, handle_t *handle,
struct buffer_head *bh)
{
- int err = journal_get_undo_access(handle, bh);
+ int err = jbd2_journal_get_undo_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@@ -126,7 +126,7 @@ static inline int
__ext4_journal_get_write_access(const char *where, handle_t *handle,
struct buffer_head *bh)
{
- int err = journal_get_write_access(handle, bh);
+ int err = jbd2_journal_get_write_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@@ -135,13 +135,13 @@ __ext4_journal_get_write_access(const char *where, handle_t *handle,
static inline void
ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
{
- journal_release_buffer(handle, bh);
+ jbd2_journal_release_buffer(handle, bh);
}
static inline int
__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
{
- int err = journal_forget(handle, bh);
+ int err = jbd2_journal_forget(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@@ -151,7 +151,7 @@ static inline int
__ext4_journal_revoke(const char *where, handle_t *handle,
unsigned long blocknr, struct buffer_head *bh)
{
- int err = journal_revoke(handle, blocknr, bh);
+ int err = jbd2_journal_revoke(handle, blocknr, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@@ -161,7 +161,7 @@ static inline int
__ext4_journal_get_create_access(const char *where,
handle_t *handle, struct buffer_head *bh)
{
- int err = journal_get_create_access(handle, bh);
+ int err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@@ -171,7 +171,7 @@ static inline int
__ext4_journal_dirty_metadata(const char *where,
handle_t *handle, struct buffer_head *bh)
{
- int err = journal_dirty_metadata(handle, bh);
+ int err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
@@ -211,22 +211,22 @@ static inline handle_t *ext4_journal_current_handle(void)
static inline int ext4_journal_extend(handle_t *handle, int nblocks)
{
- return journal_extend(handle, nblocks);
+ return jbd2_journal_extend(handle, nblocks);
}
static inline int ext4_journal_restart(handle_t *handle, int nblocks)
{
- return journal_restart(handle, nblocks);
+ return jbd2_journal_restart(handle, nblocks);
}
static inline int ext4_journal_blocks_per_page(struct inode *inode)
{
- return journal_blocks_per_page(inode);
+ return jbd2_journal_blocks_per_page(inode);
}
static inline int ext4_journal_force_commit(journal_t *journal)
{
- return journal_force_commit(journal);
+ return jbd2_journal_force_commit(journal);
}
/* super.c */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index fe89444b1c6..3251f7abb57 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1,5 +1,5 @@
/*
- * linux/include/linux/jbd.h
+ * linux/include/linux/jbd2.h
*
* Written by Stephen C. Tweedie <sct@redhat.com>
*
@@ -19,7 +19,7 @@
/* Allow this file to be included directly into e2fsprogs */
#ifndef __KERNEL__
#include "jfs_compat.h"
-#define JFS_DEBUG
+#define JBD2_DEBUG
#define jfs_debug jbd_debug
#else
@@ -57,11 +57,11 @@
* CONFIG_JBD_DEBUG is on.
*/
#define JBD_EXPENSIVE_CHECKING
-extern int journal_enable_debug;
+extern int jbd2_journal_enable_debug;
#define jbd_debug(n, f, a...) \
do { \
- if ((n) <= journal_enable_debug) { \
+ if ((n) <= jbd2_journal_enable_debug) { \
printk (KERN_DEBUG "(%s, %d): %s: ", \
__FILE__, __LINE__, __FUNCTION__); \
printk (f, ## a); \
@@ -71,16 +71,16 @@ extern int journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
-extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
-extern void * jbd_slab_alloc(size_t size, gfp_t flags);
-extern void jbd_slab_free(void *ptr, size_t size);
+extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
+extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
+extern void jbd2_slab_free(void *ptr, size_t size);
#define jbd_kmalloc(size, flags) \
- __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
+ __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \
- __jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
+ __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
-#define JFS_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_MIN_JOURNAL_BLOCKS 1024
#ifdef __KERNEL__
@@ -122,7 +122,7 @@ typedef struct journal_s journal_t; /* Journal control structure */
* Internal structures used by the logging mechanism:
*/
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
+#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
/*
* On-disk structures
@@ -132,11 +132,11 @@ typedef struct journal_s journal_t; /* Journal control structure */
* Descriptor block types:
*/
-#define JFS_DESCRIPTOR_BLOCK 1
-#define JFS_COMMIT_BLOCK 2
-#define JFS_SUPERBLOCK_V1 3
-#define JFS_SUPERBLOCK_V2 4
-#define JFS_REVOKE_BLOCK 5
+#define JBD2_DESCRIPTOR_BLOCK 1
+#define JBD2_COMMIT_BLOCK 2
+#define JBD2_SUPERBLOCK_V1 3
+#define JBD2_SUPERBLOCK_V2 4
+#define JBD2_REVOKE_BLOCK 5
/*
* Standard header for all descriptor blocks:
@@ -162,18 +162,18 @@ typedef struct journal_block_tag_s
* The revoke descriptor: used on disk to describe a series of blocks to
* be revoked from the log
*/
-typedef struct journal_revoke_header_s
+typedef struct jbd2_journal_revoke_header_s
{
journal_header_t r_header;
__be32 r_count; /* Count of bytes used in the block */
-} journal_revoke_header_t;
+} jbd2_journal_revoke_header_t;
/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
-#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
+#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
+#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */
+#define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */
+#define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
/*
@@ -196,7 +196,7 @@ typedef struct journal_superblock_s
__be32 s_start; /* blocknr of start of log */
/* 0x0020 */
- /* Error value, as set by journal_abort(). */
+ /* Error value, as set by jbd2_journal_abort(). */
__be32 s_errno;
/* 0x0024 */
@@ -224,22 +224,22 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
-#define JFS_HAS_COMPAT_FEATURE(j,mask) \
+#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
+#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
+#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
+#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES 0
-#define JFS_KNOWN_ROCOMPAT_FEATURES 0
-#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
+#define JBD2_KNOWN_COMPAT_FEATURES 0
+#define JBD2_KNOWN_ROCOMPAT_FEATURES 0
+#define JBD2_KNOWN_INCOMPAT_FEATURES JBD2_FEATURE_INCOMPAT_REVOKE
#ifdef __KERNEL__
@@ -359,7 +359,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
bit_spin_unlock(BH_JournalHead, &bh->b_state);
}
-struct jbd_revoke_table_s;
+struct jbd2_revoke_table_s;
/**
* struct handle_s - The handle_s type is the concrete type associated with
@@ -445,7 +445,7 @@ struct transaction_s
/*
* Transaction's current state
- * [no locking - only kjournald alters this]
+ * [no locking - only kjournald2 alters this]
* FIXME: needs barriers
* KLUDGE: [use j_state_lock]
*/
@@ -621,7 +621,7 @@ struct transaction_s
* @j_revoke: The revoke table - maintains the list of revoked blocks in the
* current transaction.
* @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
+ * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
* @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
* number that will fit in j_blocksize
* @j_last_sync_writer: most recent pid which did a synchronous write
@@ -805,11 +805,11 @@ struct journal_s
* current transaction. [j_revoke_lock]
*/
spinlock_t j_revoke_lock;
- struct jbd_revoke_table_s *j_revoke;
- struct jbd_revoke_table_s *j_revoke_table[2];
+ struct jbd2_revoke_table_s *j_revoke;
+ struct jbd2_revoke_table_s *j_revoke_table[2];
/*
- * array of bhs for journal_commit_transaction
+ * array of bhs for jbd2_journal_commit_transaction
*/
struct buffer_head **j_wbuf;
int j_wbufsize;
@@ -826,12 +826,12 @@ struct journal_s
/*
* Journal flag definitions
*/
-#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
-#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
-#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
-#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
-#define JFS_BARRIER 0x020 /* Use IDE barriers */
+#define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */
+#define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */
+#define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */
+#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
+#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
+#define JBD2_BARRIER 0x020 /* Use IDE barriers */
/*
* Function declarations for the journaling transaction and buffer
@@ -839,31 +839,31 @@ struct journal_s
*/
/* Filing buffers */
-extern void __journal_temp_unlink_buffer(struct journal_head *jh);
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_unfile_buffer(struct journal_head *);
+extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_clean_data_list(transaction_t *transaction);
/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned long *);
+extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
+int jbd2_journal_next_log_block(journal_t *, unsigned long *);
/* Commit management */
-extern void journal_commit_transaction(journal_t *);
+extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
extern int
-journal_write_metadata_buffer(transaction_t *transaction,
+jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
struct journal_head **jh_out,
unsigned long blocknr);
@@ -893,91 +893,91 @@ static inline handle_t *journal_current_handle(void)
* Register buffer modifications against the current transaction.
*/
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int journal_restart (handle_t *, int nblocks);
-extern int journal_extend (handle_t *, int nblocks);
-extern int journal_get_write_access(handle_t *, struct buffer_head *);
-extern int journal_get_create_access (handle_t *, struct buffer_head *);
-extern int journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int journal_dirty_data (handle_t *, struct buffer_head *);
-extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void journal_release_buffer (handle_t *, struct buffer_head *);
-extern int journal_forget (handle_t *, struct buffer_head *);
+extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
+extern int jbd2_journal_restart (handle_t *, int nblocks);
+extern int jbd2_journal_extend (handle_t *, int nblocks);
+extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
+extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
+extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
+extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
+extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
+extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
+extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
-extern void journal_invalidatepage(journal_t *,
+extern void jbd2_journal_invalidatepage(journal_t *,
struct page *, unsigned long);
-extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int journal_stop(handle_t *);
-extern int journal_flush (journal_t *);
-extern void journal_lock_updates (journal_t *);
-extern void journal_unlock_updates (journal_t *);
+extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int jbd2_journal_stop(handle_t *);
+extern int jbd2_journal_flush (journal_t *);
+extern void jbd2_journal_lock_updates (journal_t *);
+extern void jbd2_journal_unlock_updates (journal_t *);
-extern journal_t * journal_init_dev(struct block_device *bdev,
+extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int journal_update_format (journal_t *);
-extern int journal_check_used_features
+extern journal_t * jbd2_journal_init_inode (struct inode *);
+extern int jbd2_journal_update_format (journal_t *);
+extern int jbd2_journal_check_used_features
(journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_check_available_features
+extern int jbd2_journal_check_available_features
(journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_set_features
+extern int jbd2_journal_set_features
(journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_create (journal_t *);
-extern int journal_load (journal_t *journal);
-extern void journal_destroy (journal_t *);
-extern int journal_recover (journal_t *journal);
-extern int journal_wipe (journal_t *, int);
-extern int journal_skip_recovery (journal_t *);
-extern void journal_update_superblock (journal_t *, int);
-extern void __journal_abort_hard (journal_t *);
-extern void journal_abort (journal_t *, int);
-extern int journal_errno (journal_t *);
-extern void journal_ack_err (journal_t *);
-extern int journal_clear_err (journal_t *);
-extern int journal_bmap(journal_t *, unsigned long, unsigned long *);
-extern int journal_force_commit(journal_t *);
+extern int jbd2_journal_create (journal_t *);
+extern int jbd2_journal_load (journal_t *journal);
+extern void jbd2_journal_destroy (journal_t *);
+extern int jbd2_journal_recover (journal_t *journal);
+extern int jbd2_journal_wipe (journal_t *, int);
+extern int jbd2_journal_skip_recovery (journal_t *);
+extern void jbd2_journal_update_superblock (journal_t *, int);
+extern void __jbd2_journal_abort_hard (journal_t *);
+extern void jbd2_journal_abort (journal_t *, int);
+extern int jbd2_journal_errno (journal_t *);
+extern void jbd2_journal_ack_err (journal_t *);
+extern int jbd2_journal_clear_err (journal_t *);
+extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long *);
+extern int jbd2_journal_force_commit(journal_t *);
/*
* journal_head management
*/
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_remove_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
+void jbd2_journal_remove_journal_head(struct buffer_head *bh);
+void jbd2_journal_put_journal_head(struct journal_head *jh);
/*
* handle management
*/
-extern kmem_cache_t *jbd_handle_cache;
+extern kmem_cache_t *jbd2_handle_cache;
static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
{
- return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
+ return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
}
static inline void jbd_free_handle(handle_t *handle)
{
- kmem_cache_free(jbd_handle_cache, handle);
+ kmem_cache_free(jbd2_handle_cache, handle);
}
/* Primary revoke support */
#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int journal_init_revoke(journal_t *, int);
-extern void journal_destroy_revoke_caches(void);
-extern int journal_init_revoke_caches(void);
+extern int jbd2_journal_init_revoke(journal_t *, int);
+extern void jbd2_journal_destroy_revoke_caches(void);
+extern int jbd2_journal_init_revoke_caches(void);
-extern void journal_destroy_revoke(journal_t *);
-extern int journal_revoke (handle_t *,
+extern void jbd2_journal_destroy_revoke(journal_t *);
+extern int jbd2_journal_revoke (handle_t *,
unsigned long, struct buffer_head *);
-extern int journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void journal_write_revoke_records(journal_t *, transaction_t *);
+extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
/* Recovery revoke support */
-extern int journal_set_revoke(journal_t *, unsigned long, tid_t);
-extern int journal_test_revoke(journal_t *, unsigned long, tid_t);
-extern void journal_clear_revoke(journal_t *);
-extern void journal_switch_revoke_table(journal_t *journal);
+extern int jbd2_journal_set_revoke(journal_t *, unsigned long, tid_t);
+extern int jbd2_journal_test_revoke(journal_t *, unsigned long, tid_t);
+extern void jbd2_journal_clear_revoke(journal_t *);
+extern void jbd2_journal_switch_revoke_table(journal_t *journal);
/*
* The log thread user interface:
@@ -986,17 +986,17 @@ extern void journal_switch_revoke_table(journal_t *journal);
* transitions on demand.
*/
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
+int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
+int jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+int jbd2_journal_force_commit_nested(journal_t *journal);
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_log_do_checkpoint(journal_t *journal);
-void __log_wait_for_space(journal_t *journal);
-extern void __journal_drop_transaction(journal_t *, transaction_t *);
-extern int cleanup_journal_tail(journal_t *);
+void __jbd2_log_wait_for_space(journal_t *journal);
+extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
+extern int jbd2_cleanup_journal_tail(journal_t *);
/* Debugging code only: */
@@ -1010,7 +1010,7 @@ do { \
/*
* is_journal_abort
*
- * Simple test wrapper function to test the JFS_ABORT state flag. This
+ * Simple test wrapper function to test the JBD2_ABORT state flag. This
* bit, when set, indicates that we have had a fatal error somewhere,
* either inside the journaling layer or indicated to us by the client
* (eg. ext3), and that we and should not commit any further
@@ -1019,7 +1019,7 @@ do { \
static inline int is_journal_aborted(journal_t *journal)
{
- return journal->j_flags & JFS_ABORT;
+ return journal->j_flags & JBD2_ABORT;
}
static inline int is_handle_aborted(handle_t *handle)
@@ -1029,7 +1029,7 @@ static inline int is_handle_aborted(handle_t *handle)
return is_journal_aborted(handle->h_transaction->t_journal);
}
-static inline void journal_abort_handle(handle_t *handle)
+static inline void jbd2_journal_abort_handle(handle_t *handle)
{
handle->h_aborted = 1;
}
@@ -1051,7 +1051,7 @@ static inline int tid_geq(tid_t x, tid_t y)
return (difference >= 0);
}
-extern int journal_blocks_per_page(struct inode *inode);
+extern int jbd2_journal_blocks_per_page(struct inode *inode);
/*
* Return the minimum number of blocks which must be free in the journal