aboutsummaryrefslogtreecommitdiffstats
path: root/fs/reiserfs/journal.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/reiserfs/journal.c')
-rw-r--r--fs/reiserfs/journal.c67
1 files changed, 41 insertions, 26 deletions
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 072156c4f895..9f62da7471c9 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -32,7 +32,7 @@
* to disk for all backgrounded commits that have been
* around too long.
* -- Note, if you call this as an immediate flush from
- * from within kupdate, it will ignore the immediate flag
+ * within kupdate, it will ignore the immediate flag
*/
#include <linux/time.h>
@@ -461,7 +461,6 @@ int reiserfs_in_journal(struct super_block *sb,
b_blocknr_t * next_zero_bit)
{
struct reiserfs_journal *journal = SB_JOURNAL(sb);
- struct reiserfs_journal_cnode *cn;
struct reiserfs_list_bitmap *jb;
int i;
unsigned long bl;
@@ -497,13 +496,12 @@ int reiserfs_in_journal(struct super_block *sb,
bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr;
/* is it in any old transactions? */
if (search_all
- && (cn =
- get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
+ && (get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
return 1;
}
/* is it in the current transaction. This should never happen */
- if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
+ if ((get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
BUG();
return 1;
}
@@ -603,14 +601,14 @@ static int journal_list_still_alive(struct super_block *s,
*/
static void release_buffer_page(struct buffer_head *bh)
{
- struct page *page = bh->b_page;
- if (!page->mapping && trylock_page(page)) {
- get_page(page);
+ struct folio *folio = page_folio(bh->b_page);
+ if (!folio->mapping && folio_trylock(folio)) {
+ folio_get(folio);
put_bh(bh);
- if (!page->mapping)
- try_to_free_buffers(page);
- unlock_page(page);
- put_page(page);
+ if (!folio->mapping)
+ try_to_free_buffers(folio);
+ folio_unlock(folio);
+ folio_put(folio);
} else {
put_bh(bh);
}
@@ -652,7 +650,7 @@ static void submit_logged_buffer(struct buffer_head *bh)
BUG();
if (!buffer_uptodate(bh))
BUG();
- submit_bh(REQ_OP_WRITE, 0, bh);
+ submit_bh(REQ_OP_WRITE, bh);
}
static void submit_ordered_buffer(struct buffer_head *bh)
@@ -662,7 +660,7 @@ static void submit_ordered_buffer(struct buffer_head *bh)
clear_buffer_dirty(bh);
if (!buffer_uptodate(bh))
BUG();
- submit_bh(REQ_OP_WRITE, 0, bh);
+ submit_bh(REQ_OP_WRITE, bh);
}
#define CHUNK_SIZE 32
@@ -860,8 +858,8 @@ loop_next:
ret = -EIO;
}
/*
- * ugly interaction with invalidatepage here.
- * reiserfs_invalidate_page will pin any buffer that has a
+ * ugly interaction with invalidate_folio here.
+ * reiserfs_invalidate_folio will pin any buffer that has a
* valid journal head from an older transaction. If someone
* else sets our buffer dirty after we write it in the first
* loop, and then someone truncates the page away, nobody
@@ -870,7 +868,7 @@ loop_next:
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
- ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
+ write_dirty_buffer(bh, 0);
spin_lock(lock);
}
put_bh(bh);
@@ -953,7 +951,9 @@ static int reiserfs_async_progress_wait(struct super_block *s)
int depth;
depth = reiserfs_write_unlock_nested(s);
- congestion_wait(BLK_RW_ASYNC, HZ / 10);
+ wait_var_event_timeout(&j->j_async_throttle,
+ atomic_read(&j->j_async_throttle) == 0,
+ HZ / 10);
reiserfs_write_lock_nested(s, depth);
}
@@ -1054,13 +1054,14 @@ static int flush_commit_list(struct super_block *s,
if (tbh) {
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
- ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
+ write_dirty_buffer(tbh, 0);
reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
}
}
- atomic_dec(&journal->j_async_throttle);
+ if (atomic_dec_and_test(&journal->j_async_throttle))
+ wake_up_var(&journal->j_async_throttle);
for (i = 0; i < (jl->j_len + 1); i++) {
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
@@ -2239,7 +2240,7 @@ abort_replay:
}
}
/* read in the log blocks, memcpy to the corresponding real block */
- ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
+ bh_read_batch(get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]);
@@ -2341,10 +2342,11 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else
bhlist[j++] = bh;
}
- ll_rw_block(REQ_OP_READ, 0, j, bhlist);
+ bh = bhlist[0];
+ bh_read_nowait(bh, 0);
+ bh_readahead_batch(j - 1, &bhlist[1], 0);
for (i = 1; i < j; i++)
brelse(bhlist[i]);
- bh = bhlist[0];
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -2599,7 +2601,6 @@ static int journal_init_dev(struct super_block *super,
int result;
dev_t jdev;
fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
- char b[BDEVNAME_SIZE];
result = 0;
@@ -2621,8 +2622,8 @@ static int journal_init_dev(struct super_block *super,
result = PTR_ERR(journal->j_dev_bd);
journal->j_dev_bd = NULL;
reiserfs_warning(super, "sh-458",
- "cannot init journal device '%s': %i",
- __bdevname(jdev, b), result);
+ "cannot init journal device unknown-block(%u,%u): %i",
+ MAJOR(jdev), MINOR(jdev), result);
return result;
} else if (jdev != super->s_dev)
set_blocksize(journal->j_dev_bd, super->s_blocksize);
@@ -2761,6 +2762,20 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
+ /*
+ * Sanity check to see if journal first block is correct.
+ * If journal first block is invalid it can cause
+ * zeroing important superblock members.
+ */
+ if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
+ SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) {
+ reiserfs_warning(sb, "journal-1393",
+ "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d",
+ SB_JOURNAL_1st_RESERVED_BLOCK(sb),
+ SB_ONDISK_JOURNAL_1st_BLOCK(sb));
+ goto free_and_return;
+ }
+
if (journal_init_dev(sb, journal, j_dev_name) != 0) {
reiserfs_warning(sb, "sh-462",
"unable to initialize journal device");