From 45b03d5e8e674eb6555b767e1c8eb40b671ff892 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:21 -0400 Subject: reiserfs: rework reiserfs_warning ReiserFS warnings can be somewhat inconsistent. In some cases: * a unique identifier may be associated with it * the function name may be included * the device may be printed separately This patch aims to make warnings more consistent. reiserfs_warning() prints the device name, so printing it a second time is not required. The function name for a warning is always helpful in debugging, so it is now automatically inserted into the output. Hans has stated that every warning should have a unique identifier. Some cases lack them, others really shouldn't have them. reiserfs_warning() now expects an id associated with each message. In the rare case where one isn't needed, "" will suffice. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/fix_node.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 07d05e0842b7..59735a9e2349 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -496,8 +496,8 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, snum012[needed_nodes - 1 + 3] = units; if (needed_nodes > 2) - reiserfs_warning(tb->tb_sb, "vs-8111: get_num_ver: " - "split_item_position is out of boundary"); + reiserfs_warning(tb->tb_sb, "vs-8111", + "split_item_position is out of range"); snum012[needed_nodes - 1]++; split_item_positions[needed_nodes - 1] = i; needed_nodes++; @@ -533,8 +533,8 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY && vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT) - reiserfs_warning(tb->tb_sb, "vs-8115: get_num_ver: not " - "directory or indirect item"); + reiserfs_warning(tb->tb_sb, "vs-8115", + "not directory or indirect item"); } /* now we know S2bytes, calculate S1bytes */ @@ -2268,9 +2268,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) #ifdef CONFIG_REISERFS_CHECK repeat_counter++; if ((repeat_counter % 10000) == 0) { - reiserfs_warning(p_s_tb->tb_sb, - "wait_tb_buffers_until_released(): too many " - "iterations waiting for buffer to unlock " + reiserfs_warning(p_s_tb->tb_sb, "reiserfs-8200", + "too many iterations waiting " + "for buffer to unlock " "(%b)", locked); /* Don't loop forever. Try to recover from possible error. */ -- cgit v1.2.3-59-g8ed1b From c3a9c2109f84882b9b3178f6b1838d550d3df0ec Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:25 -0400 Subject: reiserfs: rework reiserfs_panic ReiserFS panics can be somewhat inconsistent. In some cases: * a unique identifier may be associated with it * the function name may be included * the device may be printed separately This patch aims to make warnings more consistent. reiserfs_warning() prints the device name, so printing it a second time is not required. The function name for a warning is always helpful in debugging, so it is now automatically inserted into the output. Hans has stated that every warning should have a unique identifier. Some cases lack them, others really shouldn't have them. reiserfs_warning() now expects an id associated with each message. In the rare case where one isn't needed, "" will suffice. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/do_balan.c | 67 ++++++++++++++++++++++-------------------- fs/reiserfs/fix_node.c | 68 ++++++++++++++++++++++--------------------- fs/reiserfs/ibalance.c | 12 ++++---- fs/reiserfs/inode.c | 3 +- fs/reiserfs/item_ops.c | 8 +++-- fs/reiserfs/journal.c | 57 ++++++++++++++++++------------------ fs/reiserfs/lbalance.c | 27 +++++++++-------- fs/reiserfs/namei.c | 18 +++++------- fs/reiserfs/objectid.c | 3 +- fs/reiserfs/prints.c | 33 ++++++++++----------- fs/reiserfs/stree.c | 49 +++++++++++++++---------------- fs/reiserfs/tail_conversion.c | 10 +++---- include/linux/reiserfs_fs.h | 28 ++++++++++++++---- 13 files changed, 200 insertions(+), 183 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index f701f37ddf98..e788fbc3ff6b 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -153,8 +153,8 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag) default: print_cur_tb("12040"); - reiserfs_panic(tb->tb_sb, - "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)", + reiserfs_panic(tb->tb_sb, "PAP-12040", + "unexpected mode: %s(%d)", (flag == M_PASTE) ? "PASTE" : ((flag == M_INSERT) ? "INSERT" : @@ -721,8 +721,9 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h } break; default: /* cases d and t */ - reiserfs_panic(tb->tb_sb, - "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)", + reiserfs_panic(tb->tb_sb, "PAP-12130", + "lnum > 0: unexpected mode: " + " %s(%d)", (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) @@ -1134,8 +1135,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h } break; default: /* cases d and t */ - reiserfs_panic(tb->tb_sb, - "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)", + reiserfs_panic(tb->tb_sb, "PAP-12175", + "rnum > 0: unexpected mode: %s(%d)", (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" @@ -1165,8 +1166,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h not set correctly */ if (tb->CFL[0]) { if (!tb->CFR[0]) - reiserfs_panic(tb->tb_sb, - "vs-12195: balance_leaf: CFR not initialized"); + reiserfs_panic(tb->tb_sb, "vs-12195", + "CFR not initialized"); copy_key(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])); do_balance_mark_internal_dirty(tb, tb->CFL[0], 0); @@ -1472,7 +1473,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h && (pos_in_item != ih_item_len(ih_check) || tb->insert_size[0] <= 0)) reiserfs_panic(tb->tb_sb, - "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len"); + "PAP-12235", + "pos_in_item " + "must be equal " + "to ih_item_len"); #endif /* CONFIG_REISERFS_CHECK */ leaf_mi = @@ -1532,8 +1536,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h } break; default: /* cases d and t */ - reiserfs_panic(tb->tb_sb, - "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)", + reiserfs_panic(tb->tb_sb, "PAP-12245", + "blknum > 2: unexpected mode: %s(%d)", (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" @@ -1678,10 +1682,11 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h print_cur_tb("12285"); reiserfs_panic(tb-> tb_sb, - "PAP-12285: balance_leaf: insert_size must be 0 (%d)", - tb-> - insert_size - [0]); + "PAP-12285", + "insert_size " + "must be 0 " + "(%d)", + tb->insert_size[0]); } } #endif /* CONFIG_REISERFS_CHECK */ @@ -1694,11 +1699,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h if (flag == M_PASTE && tb->insert_size[0]) { print_cur_tb("12290"); reiserfs_panic(tb->tb_sb, - "PAP-12290: balance_leaf: insert_size is still not 0 (%d)", + "PAP-12290", "insert_size is still not 0 (%d)", tb->insert_size[0]); } #endif /* CONFIG_REISERFS_CHECK */ - return 0; } /* Leaf level of the tree is balanced (end of balance_leaf) */ @@ -1729,8 +1733,7 @@ struct buffer_head *get_FEB(struct tree_balance *tb) break; if (i == MAX_FEB_SIZE) - reiserfs_panic(tb->tb_sb, - "vs-12300: get_FEB: FEB list is empty"); + reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty"); bi.tb = tb; bi.bi_bh = first_b = tb->FEB[i]; @@ -1871,8 +1874,8 @@ static void check_internal_node(struct super_block *s, struct buffer_head *bh, for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) { if (!is_reusable(s, dc_block_number(dc), 1)) { print_cur_tb(mes); - reiserfs_panic(s, - "PAP-12338: check_internal_node: invalid child pointer %y in %b", + reiserfs_panic(s, "PAP-12338", + "invalid child pointer %y in %b", dc, bh); } } @@ -1894,9 +1897,10 @@ static int check_before_balancing(struct tree_balance *tb) int retval = 0; if (cur_tb) { - reiserfs_panic(tb->tb_sb, "vs-12335: check_before_balancing: " - "suspect that schedule occurred based on cur_tb not being null at this point in code. " - "do_balance cannot properly handle schedule occurring while it runs."); + reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule " + "occurred based on cur_tb not being null at " + "this point in code. do_balance cannot properly " + "handle schedule occurring while it runs."); } /* double check that buffers that we will modify are unlocked. (fix_nodes should already have @@ -1928,8 +1932,8 @@ static void check_after_balance_leaf(struct tree_balance *tb) dc_size(B_N_CHILD (tb->FL[0], get_left_neighbor_position(tb, 0)))) { print_cur_tb("12221"); - reiserfs_panic(tb->tb_sb, - "PAP-12355: check_after_balance_leaf: shift to left was incorrect"); + reiserfs_panic(tb->tb_sb, "PAP-12355", + "shift to left was incorrect"); } } if (tb->rnum[0]) { @@ -1938,8 +1942,8 @@ static void check_after_balance_leaf(struct tree_balance *tb) dc_size(B_N_CHILD (tb->FR[0], get_right_neighbor_position(tb, 0)))) { print_cur_tb("12222"); - reiserfs_panic(tb->tb_sb, - "PAP-12360: check_after_balance_leaf: shift to right was incorrect"); + reiserfs_panic(tb->tb_sb, "PAP-12360", + "shift to right was incorrect"); } } if (PATH_H_PBUFFER(tb->tb_path, 1) && @@ -1964,8 +1968,7 @@ static void check_after_balance_leaf(struct tree_balance *tb) (PATH_H_PBUFFER(tb->tb_path, 1), PATH_H_POSITION(tb->tb_path, 1))), right); - reiserfs_panic(tb->tb_sb, - "PAP-12365: check_after_balance_leaf: S is incorrect"); + reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect"); } } @@ -2100,8 +2103,8 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */ tb->need_balance_dirty = 0; if (FILESYSTEM_CHANGED_TB(tb)) { - reiserfs_panic(tb->tb_sb, - "clm-6000: do_balance, fs generation has changed\n"); + reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has " + "changed"); } /* if we have no real work to do */ if (!tb->insert_size[0]) { diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 59735a9e2349..bbb37b0589af 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -135,8 +135,7 @@ static void create_virtual_node(struct tree_balance *tb, int h) vn->vn_free_ptr += op_create_vi(vn, vi, is_affected, tb->insert_size[0]); if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) - reiserfs_panic(tb->tb_sb, - "vs-8030: create_virtual_node: " + reiserfs_panic(tb->tb_sb, "vs-8030", "virtual node space consumed"); if (!is_affected) @@ -186,8 +185,9 @@ static void create_virtual_node(struct tree_balance *tb, int h) && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) { /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */ print_block(Sh, 0, -1, -1); - reiserfs_panic(tb->tb_sb, - "vs-8045: create_virtual_node: rdkey %k, affected item==%d (mode==%c) Must be %c", + reiserfs_panic(tb->tb_sb, "vs-8045", + "rdkey %k, affected item==%d " + "(mode==%c) Must be %c", key, vn->vn_affected_item_num, vn->vn_mode, M_DELETE); } @@ -1255,8 +1255,8 @@ static int ip_check_balance(struct tree_balance *tb, int h) /* Calculate balance parameters for creating new root. */ if (!Sh) { if (!h) - reiserfs_panic(tb->tb_sb, - "vs-8210: ip_check_balance: S[0] can not be 0"); + reiserfs_panic(tb->tb_sb, "vs-8210", + "S[0] can not be 0"); switch (n_ret_value = get_empty_nodes(tb, h)) { case CARRY_ON: set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); @@ -1266,8 +1266,8 @@ static int ip_check_balance(struct tree_balance *tb, int h) case REPEAT_SEARCH: return n_ret_value; default: - reiserfs_panic(tb->tb_sb, - "vs-8215: ip_check_balance: incorrect return value of get_empty_nodes"); + reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect " + "return value of get_empty_nodes"); } } @@ -2095,38 +2095,38 @@ static void tb_buffer_sanity_check(struct super_block *p_s_sb, if (p_s_bh) { if (atomic_read(&(p_s_bh->b_count)) <= 0) { - reiserfs_panic(p_s_sb, - "jmacd-1: tb_buffer_sanity_check(): negative or zero reference counter for buffer %s[%d] (%b)\n", - descr, level, p_s_bh); + reiserfs_panic(p_s_sb, "jmacd-1", "negative or zero " + "reference counter for buffer %s[%d] " + "(%b)", descr, level, p_s_bh); } if (!buffer_uptodate(p_s_bh)) { - reiserfs_panic(p_s_sb, - "jmacd-2: tb_buffer_sanity_check(): buffer is not up to date %s[%d] (%b)\n", + reiserfs_panic(p_s_sb, "jmacd-2", "buffer is not up " + "to date %s[%d] (%b)", descr, level, p_s_bh); } if (!B_IS_IN_TREE(p_s_bh)) { - reiserfs_panic(p_s_sb, - "jmacd-3: tb_buffer_sanity_check(): buffer is not in tree %s[%d] (%b)\n", + reiserfs_panic(p_s_sb, "jmacd-3", "buffer is not " + "in tree %s[%d] (%b)", descr, level, p_s_bh); } if (p_s_bh->b_bdev != p_s_sb->s_bdev) { - reiserfs_panic(p_s_sb, - "jmacd-4: tb_buffer_sanity_check(): buffer has wrong device %s[%d] (%b)\n", + reiserfs_panic(p_s_sb, "jmacd-4", "buffer has wrong " + "device %s[%d] (%b)", descr, level, p_s_bh); } if (p_s_bh->b_size != p_s_sb->s_blocksize) { - reiserfs_panic(p_s_sb, - "jmacd-5: tb_buffer_sanity_check(): buffer has wrong blocksize %s[%d] (%b)\n", + reiserfs_panic(p_s_sb, "jmacd-5", "buffer has wrong " + "blocksize %s[%d] (%b)", descr, level, p_s_bh); } if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) { - reiserfs_panic(p_s_sb, - "jmacd-6: tb_buffer_sanity_check(): buffer block number too high %s[%d] (%b)\n", + reiserfs_panic(p_s_sb, "jmacd-6", "buffer block " + "number too high %s[%d] (%b)", descr, level, p_s_bh); } } @@ -2358,14 +2358,14 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ #ifdef CONFIG_REISERFS_CHECK if (cur_tb) { print_cur_tb("fix_nodes"); - reiserfs_panic(p_s_tb->tb_sb, - "PAP-8305: fix_nodes: there is pending do_balance"); + reiserfs_panic(p_s_tb->tb_sb, "PAP-8305", + "there is pending do_balance"); } if (!buffer_uptodate(p_s_tbS0) || !B_IS_IN_TREE(p_s_tbS0)) { - reiserfs_panic(p_s_tb->tb_sb, - "PAP-8320: fix_nodes: S[0] (%b %z) is not uptodate " - "at the beginning of fix_nodes or not in tree (mode %c)", + reiserfs_panic(p_s_tb->tb_sb, "PAP-8320", "S[0] (%b %z) is " + "not uptodate at the beginning of fix_nodes " + "or not in tree (mode %c)", p_s_tbS0, p_s_tbS0, n_op_mode); } @@ -2373,24 +2373,26 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ switch (n_op_mode) { case M_INSERT: if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(p_s_tbS0)) - reiserfs_panic(p_s_tb->tb_sb, - "PAP-8330: fix_nodes: Incorrect item number %d (in S0 - %d) in case of insert", - n_item_num, B_NR_ITEMS(p_s_tbS0)); + reiserfs_panic(p_s_tb->tb_sb, "PAP-8330", "Incorrect " + "item number %d (in S0 - %d) in case " + "of insert", n_item_num, + B_NR_ITEMS(p_s_tbS0)); break; case M_PASTE: case M_DELETE: case M_CUT: if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(p_s_tbS0)) { print_block(p_s_tbS0, 0, -1, -1); - reiserfs_panic(p_s_tb->tb_sb, - "PAP-8335: fix_nodes: Incorrect item number(%d); mode = %c insert_size = %d\n", + reiserfs_panic(p_s_tb->tb_sb, "PAP-8335", "Incorrect " + "item number(%d); mode = %c " + "insert_size = %d", n_item_num, n_op_mode, p_s_tb->insert_size[0]); } break; default: - reiserfs_panic(p_s_tb->tb_sb, - "PAP-8340: fix_nodes: Incorrect mode of operation"); + reiserfs_panic(p_s_tb->tb_sb, "PAP-8340", "Incorrect mode " + "of operation"); } #endif diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c index de391a82b999..063b5514fe29 100644 --- a/fs/reiserfs/ibalance.c +++ b/fs/reiserfs/ibalance.c @@ -105,8 +105,8 @@ static void internal_define_dest_src_infos(int shift_mode, break; default: - reiserfs_panic(tb->tb_sb, - "internal_define_dest_src_infos: shift type is unknown (%d)", + reiserfs_panic(tb->tb_sb, "ibalance-1", + "shift type is unknown (%d)", shift_mode); } } @@ -702,8 +702,8 @@ static void balance_internal_when_delete(struct tree_balance *tb, return; } - reiserfs_panic(tb->tb_sb, - "balance_internal_when_delete: unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d", + reiserfs_panic(tb->tb_sb, "ibalance-2", + "unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d", h, tb->lnum[h], h, tb->rnum[h]); } @@ -940,8 +940,8 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure struct block_head *blkh; if (tb->blknum[h] != 1) - reiserfs_panic(NULL, - "balance_internal: One new node required for creating the new root"); + reiserfs_panic(NULL, "ibalance-3", "One new node " + "required for creating the new root"); /* S[h] = empty buffer from the list FEB. */ tbSh = get_FEB(tb); blkh = B_BLK_HEAD(tbSh); diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 95157762b1bf..7ee0097004c0 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1300,8 +1300,7 @@ static void update_stat_data(struct treepath *path, struct inode *inode, ih = PATH_PITEM_HEAD(path); if (!is_statdata_le_ih(ih)) - reiserfs_panic(inode->i_sb, - "vs-13065: update_stat_data: key %k, found item %h", + reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h", INODE_PKEY(inode), ih); if (stat_data_v1(ih)) { diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c index 8a11cf39f57b..72cb1cc51b87 100644 --- a/fs/reiserfs/item_ops.c +++ b/fs/reiserfs/item_ops.c @@ -517,8 +517,9 @@ static int direntry_create_vi(struct virtual_node *vn, ((is_affected && (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT)) ? insert_size : 0)) { - reiserfs_panic(NULL, - "vs-8025: set_entry_sizes: (mode==%c, insert_size==%d), invalid length of directory item", + reiserfs_panic(NULL, "vs-8025", "(mode==%c, " + "insert_size==%d), invalid length of " + "directory item", vn->vn_mode, insert_size); } } @@ -549,7 +550,8 @@ static int direntry_check_left(struct virtual_item *vi, int free, } if (entries == dir_u->entry_count) { - reiserfs_panic(NULL, "free space %d, entry_count %d\n", free, + reiserfs_panic(NULL, "item_ops-1", + "free space %d, entry_count %d", free, dir_u->entry_count); } diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 88a031fafd07..774f3ba37409 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -436,8 +436,8 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller) { #ifdef CONFIG_SMP if (current->lock_depth < 0) { - reiserfs_panic(sb, "%s called without kernel lock held", - caller); + reiserfs_panic(sb, "journal-1", "%s called without kernel " + "lock held", caller); } #else ; @@ -574,7 +574,7 @@ static inline void put_journal_list(struct super_block *s, struct reiserfs_journal_list *jl) { if (jl->j_refcount < 1) { - reiserfs_panic(s, "trans id %u, refcount at %d", + reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d", jl->j_trans_id, jl->j_refcount); } if (--jl->j_refcount == 0) @@ -1416,8 +1416,7 @@ static int flush_journal_list(struct super_block *s, count = 0; if (j_len_saved > journal->j_trans_max) { - reiserfs_panic(s, - "journal-715: flush_journal_list, length is %lu, trans id %lu\n", + reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu", j_len_saved, jl->j_trans_id); return 0; } @@ -1449,8 +1448,8 @@ static int flush_journal_list(struct super_block *s, ** or wait on a more recent transaction, or just ignore it */ if (atomic_read(&(journal->j_wcount)) != 0) { - reiserfs_panic(s, - "journal-844: panic journal list is flushing, wcount is not 0\n"); + reiserfs_panic(s, "journal-844", "journal list is flushing, " + "wcount is not 0"); } cn = jl->j_realblock; while (cn) { @@ -1551,13 +1550,13 @@ static int flush_journal_list(struct super_block *s, while (cn) { if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { if (!cn->bh) { - reiserfs_panic(s, - "journal-1011: cn->bh is NULL\n"); + reiserfs_panic(s, "journal-1011", + "cn->bh is NULL"); } wait_on_buffer(cn->bh); if (!cn->bh) { - reiserfs_panic(s, - "journal-1012: cn->bh is NULL\n"); + reiserfs_panic(s, "journal-1012", + "cn->bh is NULL"); } if (unlikely(!buffer_uptodate(cn->bh))) { #ifdef CONFIG_REISERFS_CHECK @@ -3255,8 +3254,8 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, PROC_INFO_INC(p_s_sb, journal.mark_dirty); if (th->t_trans_id != journal->j_trans_id) { - reiserfs_panic(th->t_super, - "journal-1577: handle trans id %ld != current trans id %ld\n", + reiserfs_panic(th->t_super, "journal-1577", + "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } @@ -3295,8 +3294,8 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, ** Nothing can be done here, except make the FS readonly or panic. */ if (journal->j_len >= journal->j_trans_max) { - reiserfs_panic(th->t_super, - "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", + reiserfs_panic(th->t_super, "journal-1413", + "j_len (%lu) is too big", journal->j_len); } @@ -3316,7 +3315,8 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, if (!cn) { cn = get_cnode(p_s_sb); if (!cn) { - reiserfs_panic(p_s_sb, "get_cnode failed!\n"); + reiserfs_panic(p_s_sb, "journal-4", + "get_cnode failed!"); } if (th->t_blocks_logged == th->t_blocks_allocated) { @@ -3584,8 +3584,8 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); if (th->t_trans_id != journal->j_trans_id) { - reiserfs_panic(th->t_super, - "journal-1577: handle trans id %ld != current trans id %ld\n", + reiserfs_panic(th->t_super, "journal-1577", + "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } @@ -3664,8 +3664,8 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, } if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { - reiserfs_panic(p_s_sb, - "journal-003: journal_end: j_start (%ld) is too high\n", + reiserfs_panic(p_s_sb, "journal-003", + "j_start (%ld) is too high", journal->j_start); } return 1; @@ -3710,8 +3710,8 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, /* set the bit for this block in the journal bitmap for this transaction */ jb = journal->j_current_jl->j_list_bitmap; if (!jb) { - reiserfs_panic(p_s_sb, - "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n"); + reiserfs_panic(p_s_sb, "journal-1702", + "journal_list_bitmap is NULL"); } set_bit_in_list_bitmap(p_s_sb, blocknr, jb); @@ -4066,8 +4066,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, if (buffer_journaled(cn->bh)) { jl_cn = get_cnode(p_s_sb); if (!jl_cn) { - reiserfs_panic(p_s_sb, - "journal-1676, get_cnode returned NULL\n"); + reiserfs_panic(p_s_sb, "journal-1676", + "get_cnode returned NULL"); } if (i == 0) { jl->j_realblock = jl_cn; @@ -4083,8 +4083,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, if (is_block_in_log_or_reserved_area (p_s_sb, cn->bh->b_blocknr)) { - reiserfs_panic(p_s_sb, - "journal-2332: Trying to log block %lu, which is a log block\n", + reiserfs_panic(p_s_sb, "journal-2332", + "Trying to log block %lu, " + "which is a log block", cn->bh->b_blocknr); } jl_cn->blocknr = cn->bh->b_blocknr; @@ -4268,8 +4269,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, get_list_bitmap(p_s_sb, journal->j_current_jl); if (!(journal->j_current_jl->j_list_bitmap)) { - reiserfs_panic(p_s_sb, - "journal-1996: do_journal_end, could not get a list bitmap\n"); + reiserfs_panic(p_s_sb, "journal-1996", + "could not get a list bitmap"); } atomic_set(&(journal->j_jlock), 0); diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 381339b432e7..67f1d1de213d 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -168,10 +168,11 @@ static int leaf_copy_boundary_item(struct buffer_info *dest_bi, if (bytes_or_entries == ih_item_len(ih) && is_indirect_le_ih(ih)) if (get_ih_free_space(ih)) - reiserfs_panic(NULL, - "vs-10020: leaf_copy_boundary_item: " - "last unformatted node must be filled entirely (%h)", - ih); + reiserfs_panic(sb_from_bi(dest_bi), + "vs-10020", + "last unformatted node " + "must be filled " + "entirely (%h)", ih); } #endif @@ -622,9 +623,8 @@ static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb, break; default: - reiserfs_panic(NULL, - "vs-10250: leaf_define_dest_src_infos: shift type is unknown (%d)", - shift_mode); + reiserfs_panic(sb_from_bi(src_bi), "vs-10250", + "shift type is unknown (%d)", shift_mode); } RFALSE(!src_bi->bi_bh || !dest_bi->bi_bh, "vs-10260: mode==%d, source (%p) or dest (%p) buffer is initialized incorrectly", @@ -674,9 +674,9 @@ int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes) #ifdef CONFIG_REISERFS_CHECK if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) { print_cur_tb("vs-10275"); - reiserfs_panic(tb->tb_sb, - "vs-10275: leaf_shift_left: balance condition corrupted (%c)", - tb->tb_mode); + reiserfs_panic(tb->tb_sb, "vs-10275", + "balance condition corrupted " + "(%c)", tb->tb_mode); } #endif @@ -889,9 +889,12 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num, #ifdef CONFIG_REISERFS_CHECK if (zeros_number > paste_size) { + struct super_block *sb = NULL; + if (bi && bi->tb) + sb = bi->tb->tb_sb; print_cur_tb("10177"); - reiserfs_panic(NULL, - "vs-10177: leaf_paste_in_buffer: ero number == %d, paste_size == %d", + reiserfs_panic(sb, "vs-10177", + "zeros_number == %d, paste_size == %d", zeros_number, paste_size); } #endif /* CONFIG_REISERFS_CHECK */ diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index bb41c6e7c79b..ef41cc882bd9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -145,10 +145,9 @@ int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, if (!is_direntry_le_ih(de->de_ih) || COMP_SHORT_KEYS(&(de->de_ih->ih_key), key)) { print_block(de->de_bh, 0, -1, -1); - reiserfs_panic(sb, - "vs-7005: search_by_entry_key: found item %h is not directory item or " - "does not belong to the same directory as key %K", - de->de_ih, key); + reiserfs_panic(sb, "vs-7005", "found item %h is not directory " + "item or does not belong to the same directory " + "as key %K", de->de_ih, key); } #endif /* CONFIG_REISERFS_CHECK */ @@ -1193,15 +1192,14 @@ static int entry_points_to_object(const char *name, int len, if (inode) { if (!de_visible(de->de_deh + de->de_entry_num)) - reiserfs_panic(NULL, - "vs-7042: entry_points_to_object: entry must be visible"); + reiserfs_panic(inode->i_sb, "vs-7042", + "entry must be visible"); return (de->de_objectid == inode->i_ino) ? 1 : 0; } /* this must be added hidden entry */ if (de_visible(de->de_deh + de->de_entry_num)) - reiserfs_panic(NULL, - "vs-7043: entry_points_to_object: entry must be visible"); + reiserfs_panic(NULL, "vs-7043", "entry must be visible"); return 1; } @@ -1315,8 +1313,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len, old_inode, 0); if (retval == -EEXIST) { if (!new_dentry_inode) { - reiserfs_panic(old_dir->i_sb, - "vs-7050: new entry is found, new inode == 0\n"); + reiserfs_panic(old_dir->i_sb, "vs-7050", + "new entry is found, new inode == 0"); } } else if (retval) { int err = journal_end(&th, old_dir->i_sb, jbegin_count); diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c index a3a5f43ff443..90e4e52f857b 100644 --- a/fs/reiserfs/objectid.c +++ b/fs/reiserfs/objectid.c @@ -18,8 +18,7 @@ static void check_objectid_map(struct super_block *s, __le32 * map) { if (le32_to_cpu(map[0]) != 1) - reiserfs_panic(s, - "vs-15010: check_objectid_map: map corrupted: %lx", + reiserfs_panic(s, "vs-15010", "map corrupted: %lx", (long unsigned int)le32_to_cpu(map[0])); // FIXME: add something else here diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index de71372f0dfe..1964acb6eb17 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -353,14 +353,21 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...) extern struct tree_balance *cur_tb; #endif -void reiserfs_panic(struct super_block *sb, const char *fmt, ...) +void __reiserfs_panic(struct super_block *sb, const char *id, + const char *function, const char *fmt, ...) { do_reiserfs_warning(fmt); +#ifdef CONFIG_REISERFS_CHECK dump_stack(); - - panic(KERN_EMERG "REISERFS: panic (device %s): %s\n", - reiserfs_bdevname(sb), error_buf); +#endif + if (sb) + panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n", + sb->s_id, id ? id : "", id ? " " : "", + function, error_buf); + else + panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n", + id ? id : "", id ? " " : "", function, error_buf); } void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...) @@ -681,12 +688,10 @@ static void check_leaf_block_head(struct buffer_head *bh) blkh = B_BLK_HEAD(bh); nr = blkh_nr_item(blkh); if (nr > (bh->b_size - BLKH_SIZE) / IH_SIZE) - reiserfs_panic(NULL, - "vs-6010: check_leaf_block_head: invalid item number %z", + reiserfs_panic(NULL, "vs-6010", "invalid item number %z", bh); if (blkh_free_space(blkh) > bh->b_size - BLKH_SIZE - IH_SIZE * nr) - reiserfs_panic(NULL, - "vs-6020: check_leaf_block_head: invalid free space %z", + reiserfs_panic(NULL, "vs-6020", "invalid free space %z", bh); } @@ -697,21 +702,15 @@ static void check_internal_block_head(struct buffer_head *bh) blkh = B_BLK_HEAD(bh); if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT)) - reiserfs_panic(NULL, - "vs-6025: check_internal_block_head: invalid level %z", - bh); + reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh); if (B_NR_ITEMS(bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE) - reiserfs_panic(NULL, - "vs-6030: check_internal_block_head: invalid item number %z", - bh); + reiserfs_panic(NULL, "vs-6030", "invalid item number %z", bh); if (B_FREE_SPACE(bh) != bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS(bh) - DC_SIZE * (B_NR_ITEMS(bh) + 1)) - reiserfs_panic(NULL, - "vs-6040: check_internal_block_head: invalid free space %z", - bh); + reiserfs_panic(NULL, "vs-6040", "invalid free space %z", bh); } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index f328d27a19d5..2de1e309124b 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -366,9 +366,8 @@ inline void decrement_bcount(struct buffer_head *p_s_bh) put_bh(p_s_bh); return; } - reiserfs_panic(NULL, - "PAP-5070: decrement_bcount: trying to free free buffer %b", - p_s_bh); + reiserfs_panic(NULL, "PAP-5070", + "trying to free free buffer %b", p_s_bh); } } @@ -713,8 +712,8 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* #ifdef CONFIG_REISERFS_CHECK if (cur_tb) { print_cur_tb("5140"); - reiserfs_panic(p_s_sb, - "PAP-5140: search_by_key: schedule occurred in do_balance!"); + reiserfs_panic(p_s_sb, "PAP-5140", + "schedule occurred in do_balance!"); } #endif @@ -1511,8 +1510,8 @@ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th, /* look for the last byte of the tail */ if (search_for_position_by_key(inode->i_sb, &tail_key, path) == POSITION_NOT_FOUND) - reiserfs_panic(inode->i_sb, - "vs-5615: indirect_to_direct_roll_back: found invalid item"); + reiserfs_panic(inode->i_sb, "vs-5615", + "found invalid item"); RFALSE(path->pos_in_item != ih_item_len(PATH_PITEM_HEAD(path)) - 1, "vs-5616: appended bytes found"); @@ -1612,8 +1611,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, print_block(PATH_PLAST_BUFFER(p_s_path), 3, PATH_LAST_POSITION(p_s_path) - 1, PATH_LAST_POSITION(p_s_path) + 1); - reiserfs_panic(p_s_sb, - "PAP-5580: reiserfs_cut_from_item: item to convert does not exist (%K)", + reiserfs_panic(p_s_sb, "PAP-5580", "item to " + "convert does not exist (%K)", p_s_item_key); } continue; @@ -1693,22 +1692,20 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, sure, that we exactly remove last unformatted node pointer of the item */ if (!is_indirect_le_ih(le_ih)) - reiserfs_panic(p_s_sb, - "vs-5652: reiserfs_cut_from_item: " + reiserfs_panic(p_s_sb, "vs-5652", "item must be indirect %h", le_ih); if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE) - reiserfs_panic(p_s_sb, - "vs-5653: reiserfs_cut_from_item: " - "completing indirect2direct conversion indirect item %h " - "being deleted must be of 4 byte long", - le_ih); + reiserfs_panic(p_s_sb, "vs-5653", "completing " + "indirect2direct conversion indirect " + "item %h being deleted must be of " + "4 byte long", le_ih); if (c_mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) { - reiserfs_panic(p_s_sb, - "vs-5654: reiserfs_cut_from_item: " - "can not complete indirect2direct conversion of %h (CUT, insert_size==%d)", + reiserfs_panic(p_s_sb, "vs-5654", "can not complete " + "indirect2direct conversion of %h " + "(CUT, insert_size==%d)", le_ih, s_cut_balance.insert_size[0]); } /* it would be useful to make sure, that right neighboring @@ -1923,10 +1920,10 @@ static void check_research_for_paste(struct treepath *path, || op_bytes_number(found_ih, get_last_bh(path)->b_size) != pos_in_item(path)) - reiserfs_panic(NULL, - "PAP-5720: check_research_for_paste: " - "found direct item %h or position (%d) does not match to key %K", - found_ih, pos_in_item(path), p_s_key); + reiserfs_panic(NULL, "PAP-5720", "found direct item " + "%h or position (%d) does not match " + "to key %K", found_ih, + pos_in_item(path), p_s_key); } if (is_indirect_le_ih(found_ih)) { if (le_ih_k_offset(found_ih) + @@ -1935,9 +1932,9 @@ static void check_research_for_paste(struct treepath *path, cpu_key_k_offset(p_s_key) || I_UNFM_NUM(found_ih) != pos_in_item(path) || get_ih_free_space(found_ih) != 0) - reiserfs_panic(NULL, - "PAP-5730: check_research_for_paste: " - "found indirect item (%h) or position (%d) does not match to key (%K)", + reiserfs_panic(NULL, "PAP-5730", "found indirect " + "item (%h) or position (%d) does not " + "match to key (%K)", found_ih, pos_in_item(path), p_s_key); } } diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index 256285dddb20..f8449cb74b53 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -92,8 +92,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, last item of the file */ if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) - reiserfs_panic(sb, - "PAP-14050: direct2indirect: " + reiserfs_panic(sb, "PAP-14050", "direct item (%K) not found", &end_key); p_le_ih = PATH_PITEM_HEAD(path); RFALSE(!is_direct_le_ih(p_le_ih), @@ -214,8 +213,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in /* re-search indirect item */ if (search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND) - reiserfs_panic(p_s_sb, - "PAP-5520: indirect2direct: " + reiserfs_panic(p_s_sb, "PAP-5520", "item to be converted %K does not exist", p_s_item_key); copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); @@ -224,8 +222,8 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in (ih_item_len(&s_ih) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize; if (pos != pos1) - reiserfs_panic(p_s_sb, "vs-5530: indirect2direct: " - "tail position changed while we were reading it"); + reiserfs_panic(p_s_sb, "vs-5530", "tail position " + "changed while we were reading it"); #endif } diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index cf5407ee0f32..04bfd61eeaaa 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -86,11 +86,14 @@ void __reiserfs_warning(struct super_block *s, const char *id, /* assertions handling */ /** always check a condition and panic if it's false. */ -#define __RASSERT( cond, scond, format, args... ) \ -if( !( cond ) ) \ - reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \ - __FILE__ ":%i:%s: " format "\n", \ - in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args ) +#define __RASSERT(cond, scond, format, args...) \ +do { \ + if (!(cond)) \ + reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \ + __FILE__ ":%i:%s: " format "\n", \ + in_interrupt() ? -1 : task_pid_nr(current), \ + __LINE__, __func__ , ##args); \ +} while (0) #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) @@ -1448,6 +1451,16 @@ struct buffer_info { int bi_position; }; +static inline struct super_block *sb_from_tb(struct tree_balance *tb) +{ + return tb ? tb->tb_sb : NULL; +} + +static inline struct super_block *sb_from_bi(struct buffer_info *bi) +{ + return bi ? sb_from_tb(bi->tb) : NULL; +} + /* there are 4 types of items: stat data, directory item, indirect, direct. +-------------------+------------+--------------+------------+ | | k_offset | k_uniqueness | mergeable? | @@ -1988,8 +2001,11 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, void unfix_nodes(struct tree_balance *); /* prints.c */ -void reiserfs_panic(struct super_block *s, const char *fmt, ...) +void __reiserfs_panic(struct super_block *s, const char *id, + const char *function, const char *fmt, ...) __attribute__ ((noreturn)); +#define reiserfs_panic(s, id, fmt, args...) \ + __reiserfs_panic(s, id, __func__, fmt, ##args) void reiserfs_info(struct super_block *s, const char *fmt, ...); void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...); void print_indirect_item(struct buffer_head *bh, int item_num); -- cgit v1.2.3-59-g8ed1b From 3cd6dbe6feb9b32347e6c6f25a27f0cde9d50418 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:43 -0400 Subject: reiserfs: cleanup path functions This patch cleans up some redundancies in the reiserfs tree path code. decrement_bcount() is essentially the same function as brelse(), so we use that instead. decrement_counters_in_path() is exactly the same function as pathrelse(), so we kill that and use pathrelse() instead. There's also a bit of cleanup that makes the code a bit more readable. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/fix_node.c | 58 +++++++++++++++++++++++++------------------------ fs/reiserfs/stree.c | 59 +++++++++++--------------------------------------- 2 files changed, 43 insertions(+), 74 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index bbb37b0589af..aee50c97988d 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -753,20 +753,21 @@ static void free_buffers_in_tb(struct tree_balance *p_s_tb) { int n_counter; - decrement_counters_in_path(p_s_tb->tb_path); + pathrelse(p_s_tb->tb_path); for (n_counter = 0; n_counter < MAX_HEIGHT; n_counter++) { - decrement_bcount(p_s_tb->L[n_counter]); + brelse(p_s_tb->L[n_counter]); + brelse(p_s_tb->R[n_counter]); + brelse(p_s_tb->FL[n_counter]); + brelse(p_s_tb->FR[n_counter]); + brelse(p_s_tb->CFL[n_counter]); + brelse(p_s_tb->CFR[n_counter]); + p_s_tb->L[n_counter] = NULL; - decrement_bcount(p_s_tb->R[n_counter]); p_s_tb->R[n_counter] = NULL; - decrement_bcount(p_s_tb->FL[n_counter]); p_s_tb->FL[n_counter] = NULL; - decrement_bcount(p_s_tb->FR[n_counter]); p_s_tb->FR[n_counter] = NULL; - decrement_bcount(p_s_tb->CFL[n_counter]); p_s_tb->CFL[n_counter] = NULL; - decrement_bcount(p_s_tb->CFR[n_counter]); p_s_tb->CFR[n_counter] = NULL; } } @@ -1022,7 +1023,7 @@ static int get_far_parent(struct tree_balance *p_s_tb, if (buffer_locked(*pp_s_com_father)) { __wait_on_buffer(*pp_s_com_father); if (FILESYSTEM_CHANGED_TB(p_s_tb)) { - decrement_bcount(*pp_s_com_father); + brelse(*pp_s_com_father); return REPEAT_SEARCH; } } @@ -1050,8 +1051,8 @@ static int get_far_parent(struct tree_balance *p_s_tb, return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { - decrement_counters_in_path(&s_path_to_neighbor_father); - decrement_bcount(*pp_s_com_father); + pathrelse(&s_path_to_neighbor_father); + brelse(*pp_s_com_father); return REPEAT_SEARCH; } @@ -1063,7 +1064,7 @@ static int get_far_parent(struct tree_balance *p_s_tb, FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small"); s_path_to_neighbor_father.path_length--; - decrement_counters_in_path(&s_path_to_neighbor_father); + pathrelse(&s_path_to_neighbor_father); return CARRY_ON; } @@ -1086,10 +1087,10 @@ static int get_parents(struct tree_balance *p_s_tb, int n_h) if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) { /* The root can not have parents. Release nodes which previously were obtained as parents of the current node neighbors. */ - decrement_bcount(p_s_tb->FL[n_h]); - decrement_bcount(p_s_tb->CFL[n_h]); - decrement_bcount(p_s_tb->FR[n_h]); - decrement_bcount(p_s_tb->CFR[n_h]); + brelse(p_s_tb->FL[n_h]); + brelse(p_s_tb->CFL[n_h]); + brelse(p_s_tb->FR[n_h]); + brelse(p_s_tb->CFR[n_h]); p_s_tb->FL[n_h] = p_s_tb->CFL[n_h] = p_s_tb->FR[n_h] = p_s_tb->CFR[n_h] = NULL; return CARRY_ON; @@ -1115,9 +1116,9 @@ static int get_parents(struct tree_balance *p_s_tb, int n_h) return n_ret_value; } - decrement_bcount(p_s_tb->FL[n_h]); + brelse(p_s_tb->FL[n_h]); p_s_tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */ - decrement_bcount(p_s_tb->CFL[n_h]); + brelse(p_s_tb->CFL[n_h]); p_s_tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) || @@ -1145,10 +1146,10 @@ static int get_parents(struct tree_balance *p_s_tb, int n_h) p_s_tb->rkey[n_h] = n_position; } - decrement_bcount(p_s_tb->FR[n_h]); + brelse(p_s_tb->FR[n_h]); p_s_tb->FR[n_h] = p_s_curf; /* New initialization of FR[n_path_offset]. */ - decrement_bcount(p_s_tb->CFR[n_h]); + brelse(p_s_tb->CFR[n_h]); p_s_tb->CFR[n_h] = p_s_curcf; /* New initialization of CFR[n_path_offset]. */ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) || @@ -1964,7 +1965,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) if (!p_s_bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { - decrement_bcount(p_s_bh); + brelse(p_s_bh); PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } @@ -1980,7 +1981,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) dc_size(B_N_CHILD(p_s_tb->FL[0], n_child_position)), "PAP-8290: invalid child size of left neighbor"); - decrement_bcount(p_s_tb->L[n_h]); + brelse(p_s_tb->L[n_h]); p_s_tb->L[n_h] = p_s_bh; } @@ -2001,11 +2002,11 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) if (!p_s_bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { - decrement_bcount(p_s_bh); + brelse(p_s_bh); PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } - decrement_bcount(p_s_tb->R[n_h]); + brelse(p_s_tb->R[n_h]); p_s_tb->R[n_h] = p_s_bh; RFALSE(!n_h @@ -2511,16 +2512,17 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ } brelse(p_s_tb->L[i]); - p_s_tb->L[i] = NULL; brelse(p_s_tb->R[i]); - p_s_tb->R[i] = NULL; brelse(p_s_tb->FL[i]); - p_s_tb->FL[i] = NULL; brelse(p_s_tb->FR[i]); - p_s_tb->FR[i] = NULL; brelse(p_s_tb->CFL[i]); - p_s_tb->CFL[i] = NULL; brelse(p_s_tb->CFR[i]); + + p_s_tb->L[i] = NULL; + p_s_tb->R[i] = NULL; + p_s_tb->FL[i] = NULL; + p_s_tb->FR[i] = NULL; + p_s_tb->CFL[i] = NULL; p_s_tb->CFR[i] = NULL; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index ec837a250a4f..b2eaa0c6b7b7 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -23,7 +23,6 @@ * get_rkey * key_in_buffer * decrement_bcount - * decrement_counters_in_path * reiserfs_check_path * pathrelse_and_restore * pathrelse @@ -359,36 +358,6 @@ static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which sho return 1; } -inline void decrement_bcount(struct buffer_head *p_s_bh) -{ - if (p_s_bh) { - if (atomic_read(&(p_s_bh->b_count))) { - put_bh(p_s_bh); - return; - } - reiserfs_panic(NULL, "PAP-5070", - "trying to free free buffer %b", p_s_bh); - } -} - -/* Decrement b_count field of the all buffers in the path. */ -void decrement_counters_in_path(struct treepath *p_s_search_path) -{ - int n_path_offset = p_s_search_path->path_length; - - RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET || - n_path_offset > EXTENDED_MAX_HEIGHT - 1, - "PAP-5080: invalid path offset of %d", n_path_offset); - - while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { - struct buffer_head *bh; - - bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--); - decrement_bcount(bh); - } - p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; -} - int reiserfs_check_path(struct treepath *p) { RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET, @@ -396,12 +365,11 @@ int reiserfs_check_path(struct treepath *p) return 0; } -/* Release all buffers in the path. Restore dirty bits clean -** when preparing the buffer for the log -** -** only called from fix_nodes() -*/ -void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path) +/* Drop the reference to each buffer in a path and restore + * dirty bits clean when preparing the buffer for the log. + * This version should only be called from fix_nodes() */ +void pathrelse_and_restore(struct super_block *sb, + struct treepath *p_s_search_path) { int n_path_offset = p_s_search_path->path_length; @@ -409,16 +377,15 @@ void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_pa "clm-4000: invalid path offset"); while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { - reiserfs_restore_prepared_buffer(s, - PATH_OFFSET_PBUFFER - (p_s_search_path, - n_path_offset)); - brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--)); + struct buffer_head *bh; + bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--); + reiserfs_restore_prepared_buffer(sb, bh); + brelse(bh); } p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } -/* Release all buffers in the path. */ +/* Drop the reference to each buffer in a path */ void pathrelse(struct treepath *p_s_search_path) { int n_path_offset = p_s_search_path->path_length; @@ -631,7 +598,7 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* we must be careful to release all nodes in a path before we either discard the path struct or re-use the path struct, as we do here. */ - decrement_counters_in_path(p_s_search_path); + pathrelse(p_s_search_path); right_neighbor_of_leaf_node = 0; @@ -691,7 +658,7 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* PROC_INFO_INC(p_s_sb, search_by_key_restarted); PROC_INFO_INC(p_s_sb, sbk_restarted[expected_level - 1]); - decrement_counters_in_path(p_s_search_path); + pathrelse(p_s_search_path); /* Get the root block number so that we can repeat the search starting from the root. */ @@ -1868,7 +1835,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p if (journal_transaction_should_end(th, 0) || reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) { int orig_len_alloc = th->t_blocks_allocated; - decrement_counters_in_path(&s_search_path); + pathrelse(&s_search_path); if (update_timestamps) { p_s_inode->i_mtime = p_s_inode->i_ctime = -- cgit v1.2.3-59-g8ed1b From 0222e6571c332563a48d4cf5487b67feabe60b5e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:44 -0400 Subject: reiserfs: strip trailing whitespace This patch strips trailing whitespace from the reiserfs code. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/README | 4 +- fs/reiserfs/do_balan.c | 14 ++--- fs/reiserfs/file.c | 8 +-- fs/reiserfs/fix_node.c | 38 ++++++------- fs/reiserfs/hashes.c | 2 +- fs/reiserfs/ibalance.c | 10 ++-- fs/reiserfs/inode.c | 52 +++++++++--------- fs/reiserfs/ioctl.c | 2 +- fs/reiserfs/journal.c | 120 ++++++++++++++++++++--------------------- fs/reiserfs/lbalance.c | 18 +++---- fs/reiserfs/namei.c | 30 +++++------ fs/reiserfs/objectid.c | 2 +- fs/reiserfs/prints.c | 26 ++++----- fs/reiserfs/procfs.c | 2 +- fs/reiserfs/resize.c | 6 +-- fs/reiserfs/stree.c | 8 +-- fs/reiserfs/super.c | 10 ++-- fs/reiserfs/tail_conversion.c | 2 +- include/linux/reiserfs_fs_sb.h | 14 ++--- 19 files changed, 184 insertions(+), 184 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/README b/fs/reiserfs/README index 90e1670e4e6f..14e8c9d460e5 100644 --- a/fs/reiserfs/README +++ b/fs/reiserfs/README @@ -1,4 +1,4 @@ -[LICENSING] +[LICENSING] ReiserFS is hereby licensed under the GNU General Public License version 2. @@ -31,7 +31,7 @@ the GPL as not allowing those additional licensing options, you read it wrongly, and Richard Stallman agrees with me, when carefully read you can see that those restrictions on additional terms do not apply to the owner of the copyright, and my interpretation of this shall -govern for this license. +govern for this license. Finally, nothing in this license shall be interpreted to allow you to fail to fairly credit me, or to remove my credits, without my diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 723a7f4011d0..4beb964a2a3e 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty -/* summary: +/* summary: if deleting something ( tb->insert_size[0] < 0 ) return(balance_leaf_when_delete()); (flag d handled here) else if lnum is larger than 0 we put items into the left node if rnum is larger than 0 we put items into the right node if snum1 is larger than 0 we put items into the new node s1 - if snum2 is larger than 0 we put items into the new node s2 + if snum2 is larger than 0 we put items into the new node s2 Note that all *num* count new items being created. It would be easier to read balance_leaf() if each of these summary lines was a separate procedure rather than being inlined. I think that there are many passages here and in balance_leaf_when_delete() in which two calls to one procedure can replace two passages, and it -might save cache space and improve software maintenance costs to do so. +might save cache space and improve software maintenance costs to do so. Vladimir made the perceptive comment that we should offload most of the decision making in this function into fix_nodes/check_balance, and @@ -288,15 +288,15 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h ) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); - int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] + int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] of the affected item */ struct buffer_info bi; struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ int snum[2]; /* number of items that will be placed into S_new (includes partially shifted items) */ - int sbytes[2]; /* if an item is partially shifted into S_new then - if it is a directory item + int sbytes[2]; /* if an item is partially shifted into S_new then + if it is a directory item it is the number of entries from the item that are shifted into S_new else it is the number of bytes from the item that are shifted into S_new @@ -1983,7 +1983,7 @@ static inline void do_balance_starts(struct tree_balance *tb) /* store_print_tb (tb); */ /* do not delete, just comment it out */ -/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, +/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, "check");*/ RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); #ifdef CONFIG_REISERFS_CHECK diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 47bab8978be1..f0160ee03e17 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -20,14 +20,14 @@ ** insertion/balancing, for files that are written in one write. ** It avoids unnecessary tail packings (balances) for files that are written in ** multiple writes and are small enough to have tails. -** +** ** file_release is called by the VFS layer when the file is closed. If ** this is the last open file descriptor, and the file ** small enough to have a tail, and the tail is currently in an ** unformatted node, the tail is converted back into a direct item. -** +** ** We use reiserfs_truncate_file to pack the tail, since it already has -** all the conditions coded. +** all the conditions coded. */ static int reiserfs_file_release(struct inode *inode, struct file *filp) { @@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page, } /* Write @count bytes at position @ppos in a file indicated by @file - from the buffer @buf. + from the buffer @buf. generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index aee50c97988d..a3be7da3e2b9 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -30,8 +30,8 @@ ** get_direct_parent ** get_neighbors ** fix_nodes - ** - ** + ** + ** **/ #include @@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, int needed_nodes; int start_item, /* position of item we start filling node from */ end_item, /* position of item we finish filling node by */ - start_bytes, /* number of first bytes (entries for directory) of start_item-th item + start_bytes, /* number of first bytes (entries for directory) of start_item-th item we do not include into node that is being filled */ - end_bytes; /* number of last bytes (entries for directory) of end_item-th item + end_bytes; /* number of last bytes (entries for directory) of end_item-th item we do node include into node that is being filled */ int split_item_positions[2]; /* these are positions in virtual item of items, that are split between S[0] and @@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb; /* Set parameters for balancing. * Performs write of results of analysis of balancing into structure tb, - * where it will later be used by the functions that actually do the balancing. + * where it will later be used by the functions that actually do the balancing. * Parameters: * tb tree_balance structure; * h current level of the node; @@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste; - * Returns: 1 - schedule occurred; + * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. @@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters. where 4th parameter is s1bytes and 5th - s2bytes */ - short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases + short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases 0,1 - do not shift and do not shift but bottle 2 - shift only whole item to left 3 - shift to left and bottle as much as possible @@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) create_virtual_node(tb, h); - /* + /* determine maximal number of items we can shift to the left neighbor (in tb structure) and the maximal number of bytes that can flow to the left neighbor from the left most liquid item that cannot be shifted from S[0] entirely (returned value) @@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_balance *tb, int h) { int lpar, rpar, nset, lset, rset, lrset; - /* + /* * regular overflowing of the node */ - /* get_num_ver works in 2 modes (FLOW & NO_FLOW) + /* get_num_ver works in 2 modes (FLOW & NO_FLOW) lpar, rpar - number of items we can shift to left/right neighbor (including splitting item) - nset, lset, rset, lrset - shows, whether flowing items give better packing + nset, lset, rset, lrset - shows, whether flowing items give better packing */ #define FLOW 1 #define NO_FLOW 0 /* do not any splitting */ @@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste; - * Returns: 1 - schedule occurred; + * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. @@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste; - * Returns: 1 - schedule occurred; + * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. @@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) * h current level of the node; * inum item number in S[h]; * mode d - delete, c - cut. - * Returns: 1 - schedule occurred; + * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. @@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_balance *tb, int h) * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste, d - delete, c - cut. - * Returns: 1 - schedule occurred; + * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. @@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) * analyze what and where should be moved; * get sufficient number of new nodes; * Balancing will start only after all resources will be collected at a time. - * + * * When ported to SMP kernels, only at the last moment after all needed nodes * are collected in cache, will the resources be locked using the usual * textbook ordered lock acquisition algorithms. Note that ensuring that * this code neither write locks what it does not need to write lock nor locks out of order * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans - * + * * fix is meant in the sense of render unchanging - * + * * Latency might be improved by first gathering a list of what buffers are needed * and then getting as many of them in parallel as possible? -Hans * @@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) * ins_ih & ins_sd are used when inserting * Returns: 1 - schedule occurred while the function worked; * 0 - schedule didn't occur while the function worked; - * -1 - if no_disk_space + * -1 - if no_disk_space */ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c index e664ac16fad9..6471c670743e 100644 --- a/fs/reiserfs/hashes.c +++ b/fs/reiserfs/hashes.c @@ -7,7 +7,7 @@ * (see Applied Cryptography, 2nd edition, p448). * * Jeremy Fitzhardinge 1998 - * + * * Jeremy has agreed to the contents of reiserfs/README. -Hans * Yura's function is added (04/07/2000) */ diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c index 063b5514fe29..2074fd95046b 100644 --- a/fs/reiserfs/ibalance.c +++ b/fs/reiserfs/ibalance.c @@ -278,7 +278,7 @@ static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n) /* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest * last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest - * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest + * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest */ static void internal_copy_pointers_items(struct buffer_info *dest_bi, struct buffer_head *src, @@ -385,7 +385,7 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi, if (last_first == FIRST_TO_LAST) { /* shift_left occurs */ first_pointer = 0; first_item = 0; - /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, + /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, for key - with first_item */ internal_delete_pointers_items(src_bi, first_pointer, first_item, cpy_num - del_par); @@ -453,7 +453,7 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b } } -/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. +/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest. * Replace d_key'th key in buffer cfl. * Delete pointer_amount items and node pointers from buffer src. @@ -518,7 +518,7 @@ static void internal_shift1_left(struct tree_balance *tb, /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */ } -/* Insert d_key'th (delimiting) key from buffer cfr to head of dest. +/* Insert d_key'th (delimiting) key from buffer cfr to head of dest. * Copy n node pointers and n - 1 items from buffer src to buffer dest. * Replace d_key'th key in buffer cfr. * Delete n items and node pointers from buffer src. @@ -749,7 +749,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure this means that new pointers and items must be inserted AFTER * child_pos } - else + else { it is the position of the leftmost pointer that must be deleted (together with its corresponding key to the left of the pointer) diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index fcd302d81447..d106edaef64f 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode *inode) /* Do quota update inside a transaction for journaled quotas. We must do that * after delete_object so that quota updates go into the same transaction as * stat data deletion */ - if (!err) + if (!err) DQUOT_FREE_INODE(inode); if (journal_end(&th, inode->i_sb, jbegin_count)) @@ -363,7 +363,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block, } /* make sure we don't read more bytes than actually exist in ** the file. This can happen in odd cases where i_size isn't - ** correct, and when direct item padding results in a few + ** correct, and when direct item padding results in a few ** extra bytes at the end of the direct item */ if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size) @@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *inode, sector_t block, ** -ENOENT instead of a valid buffer. block_prepare_write expects to ** be able to do i/o on the buffers returned, unless an error value ** is also returned. -** +** ** So, this allows block_prepare_write to be used for reading a single block ** in a page. Where it does not produce a valid page for holes, or past the ** end of the file. This turns out to be exactly what we need for reading ** tails for conversion. ** ** The point of the wrapper is forcing a certain value for create, even -** though the VFS layer is calling this function with create==1. If you -** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, +** though the VFS layer is calling this function with create==1. If you +** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, ** don't use this function. */ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, @@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, int done; int fs_gen; struct reiserfs_transaction_handle *th = NULL; - /* space reserved in transaction batch: + /* space reserved in transaction batch: . 3 balancings in direct->indirect conversion . 1 block involved into reiserfs_update_sd() XXX in practically impossible worst case direct2indirect() @@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, reiserfs_write_unlock(inode->i_sb); /* the item was found, so new blocks were not added to the file - ** there is no need to make sure the inode is updated with this + ** there is no need to make sure the inode is updated with this ** transaction */ return retval; @@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, /* this loop could log more blocks than we had originally asked ** for. So, we have to allow the transaction to end if it is - ** too big or too full. Update the inode so things are + ** too big or too full. Update the inode so things are ** consistent if we crash before the function returns ** ** release the path so that anybody waiting on the path before @@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, if (retval) goto failure; } - /* inserting indirect pointers for a hole can take a + /* inserting indirect pointers for a hole can take a ** long time. reschedule if needed */ cond_resched(); @@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct inode *inode, update sd on unlink all that is required is to check for nlink here. This bug was first found by Sizif when debugging SquidNG/Butterfly, forgotten, and found again after Philippe - Gramoulle reproduced it. + Gramoulle reproduced it. More logical fix would require changes in fs/inode.c:iput() to remove inode from hash-table _after_ fs cleaned disk stuff up and @@ -1619,7 +1619,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync) if (inode->i_sb->s_flags & MS_RDONLY) return -EROFS; /* memory pressure can sometimes initiate write_inode calls with sync == 1, - ** these cases are just when the system needs ram, not when the + ** these cases are just when the system needs ram, not when the ** inode needs to reach disk for safety, and they can safely be ** ignored because the altered inode has already been logged. */ @@ -1736,7 +1736,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i /* inserts the stat data into the tree, and then calls reiserfs_new_directory (to insert ".", ".." item if new object is directory) or reiserfs_new_symlink (to insert symlink body if new - object is symlink) or nothing (if new object is regular file) + object is symlink) or nothing (if new object is regular file) NOTE! uid and gid must already be set in the inode. If we return non-zero due to an error, we have to drop the quota previously allocated @@ -1744,7 +1744,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i if we return non-zero, we also end the transaction. */ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, struct inode *dir, int mode, const char *symname, - /* 0 for regular, EMTRY_DIR_SIZE for dirs, + /* 0 for regular, EMTRY_DIR_SIZE for dirs, strlen (symname) for symlinks) */ loff_t i_size, struct dentry *dentry, struct inode *inode, @@ -1794,7 +1794,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, goto out_bad_inode; } if (old_format_only(sb)) - /* not a perfect generation count, as object ids can be reused, but + /* not a perfect generation count, as object ids can be reused, but ** this is as good as reiserfs can do right now. ** note that the private part of inode isn't filled in yet, we have ** to use the directory. @@ -2081,7 +2081,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) if (p_s_inode->i_size > 0) { if ((error = grab_tail_page(p_s_inode, &page, &bh))) { - // -ENOENT means we truncated past the end of the file, + // -ENOENT means we truncated past the end of the file, // and get_block_create_0 could not find a block to read in, // which is ok. if (error != -ENOENT) @@ -2093,11 +2093,11 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) } } - /* so, if page != NULL, we have a buffer head for the offset at - ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0, - ** then we have an unformatted node. Otherwise, we have a direct item, - ** and no zeroing is required on disk. We zero after the truncate, - ** because the truncate might pack the item anyway + /* so, if page != NULL, we have a buffer head for the offset at + ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0, + ** then we have an unformatted node. Otherwise, we have a direct item, + ** and no zeroing is required on disk. We zero after the truncate, + ** because the truncate might pack the item anyway ** (it will unmap bh if it packs). */ /* it is enough to reserve space in transaction for 2 balancings: @@ -2306,8 +2306,8 @@ static int map_block_for_writepage(struct inode *inode, return retval; } -/* - * mason@suse.com: updated in 2.5.54 to follow the same general io +/* + * mason@suse.com: updated in 2.5.54 to follow the same general io * start/recovery path as __block_write_full_page, along with special * code to handle reiserfs tails. */ @@ -2447,7 +2447,7 @@ static int reiserfs_write_full_page(struct page *page, unlock_page(page); /* - * since any buffer might be the only dirty buffer on the page, + * since any buffer might be the only dirty buffer on the page, * the first submit_bh can bring the page out of writeback. * be careful with the buffers. */ @@ -2466,8 +2466,8 @@ static int reiserfs_write_full_page(struct page *page, if (nr == 0) { /* * if this page only had a direct item, it is very possible for - * no io to be required without there being an error. Or, - * someone else could have locked them and sent them down the + * no io to be required without there being an error. Or, + * someone else could have locked them and sent them down the * pipe without locking the page */ bh = head; @@ -2486,7 +2486,7 @@ static int reiserfs_write_full_page(struct page *page, fail: /* catches various errors, we need to make sure any valid dirty blocks - * get to the media. The page is currently locked and not marked for + * get to the media. The page is currently locked and not marked for * writeback */ ClearPageUptodate(page); diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 830332021ed4..0ccc3fdda7bf 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) } /* we unpack by finding the page with the tail, and calling - ** reiserfs_prepare_write on that page. This will force a + ** reiserfs_prepare_write on that page. This will force a ** reiserfs_get_block to unpack the tail for us. */ index = inode->i_size >> PAGE_CACHE_SHIFT; diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index db91754cfb83..4f787462becc 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1,36 +1,36 @@ /* ** Write ahead logging implementation copyright Chris Mason 2000 ** -** The background commits make this code very interelated, and +** The background commits make this code very interelated, and ** overly complex. I need to rethink things a bit....The major players: ** -** journal_begin -- call with the number of blocks you expect to log. +** journal_begin -- call with the number of blocks you expect to log. ** If the current transaction is too -** old, it will block until the current transaction is +** old, it will block until the current transaction is ** finished, and then start a new one. -** Usually, your transaction will get joined in with +** Usually, your transaction will get joined in with ** previous ones for speed. ** -** journal_join -- same as journal_begin, but won't block on the current +** journal_join -- same as journal_begin, but won't block on the current ** transaction regardless of age. Don't ever call -** this. Ever. There are only two places it should be +** this. Ever. There are only two places it should be ** called from, and they are both inside this file. ** -** journal_mark_dirty -- adds blocks into this transaction. clears any flags +** journal_mark_dirty -- adds blocks into this transaction. clears any flags ** that might make them get sent to disk -** and then marks them BH_JDirty. Puts the buffer head -** into the current transaction hash. +** and then marks them BH_JDirty. Puts the buffer head +** into the current transaction hash. ** ** journal_end -- if the current transaction is batchable, it does nothing ** otherwise, it could do an async/synchronous commit, or -** a full flush of all log and real blocks in the +** a full flush of all log and real blocks in the ** transaction. ** -** flush_old_commits -- if the current transaction is too old, it is ended and -** commit blocks are sent to disk. Forces commit blocks -** to disk for all backgrounded commits that have been +** flush_old_commits -- if the current transaction is too old, it is ended and +** commit blocks are sent to disk. Forces commit blocks +** to disk for all backgrounded commits that have been ** around too long. -** -- Note, if you call this as an immediate flush from +** -- Note, if you call this as an immediate flush from ** from within kupdate, it will ignore the immediate flag */ @@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct super_block *p_s_sb) list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } else { - break; // this is ok, we'll try again when more are needed + break; /* this is ok, we'll try again when more are needed */ } } } @@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct super_block *p_s_sb) } /* -** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. +** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** jb_array is the array to be filled in. */ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, @@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, } /* -** find an available list bitmap. If you can't find one, flush a commit list +** find an available list bitmap. If you can't find one, flush a commit list ** and try again */ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, @@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, return jb; } -/* +/* ** allocates a new chunk of X nodes, and links them all together as a list. ** Uses the cnode->next and cnode->prev pointers ** returns NULL on failure @@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) } /* -** pulls a cnode off the free list, or returns NULL on failure +** pulls a cnode off the free list, or returns NULL on failure */ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) { @@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) } /* -** returns a cnode to the free list +** returns a cnode to the free list */ static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) @@ -1192,8 +1192,8 @@ static int flush_commit_list(struct super_block *s, } /* -** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or -** returns NULL if it can't find anything +** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or +** returns NULL if it can't find anything */ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode @@ -1335,8 +1335,8 @@ static int update_journal_header_block(struct super_block *p_s_sb, return _update_journal_header_block(p_s_sb, offset, trans_id); } -/* -** flush any and all journal lists older than you are +/* +** flush any and all journal lists older than you are ** can only be called from flush_journal_list */ static int flush_older_journal_lists(struct super_block *p_s_sb, @@ -1382,8 +1382,8 @@ static void del_from_work_list(struct super_block *s, ** always set flushall to 1, unless you are calling from inside ** flush_journal_list ** -** IMPORTANT. This can only be called while there are no journal writers, -** and the journal is locked. That means it can only be called from +** IMPORTANT. This can only be called while there are no journal writers, +** and the journal is locked. That means it can only be called from ** do_journal_end, or by journal_release */ static int flush_journal_list(struct super_block *s, @@ -1429,7 +1429,7 @@ static int flush_journal_list(struct super_block *s, goto flush_older_and_return; } - /* start by putting the commit list on disk. This will also flush + /* start by putting the commit list on disk. This will also flush ** the commit lists of any olders transactions */ flush_commit_list(s, jl, 1); @@ -1444,8 +1444,8 @@ static int flush_journal_list(struct super_block *s, goto flush_older_and_return; } - /* loop through each cnode, see if we need to write it, - ** or wait on a more recent transaction, or just ignore it + /* loop through each cnode, see if we need to write it, + ** or wait on a more recent transaction, or just ignore it */ if (atomic_read(&(journal->j_wcount)) != 0) { reiserfs_panic(s, "journal-844", "journal list is flushing, " @@ -1473,8 +1473,8 @@ static int flush_journal_list(struct super_block *s, if (!pjl && cn->bh) { saved_bh = cn->bh; - /* we do this to make sure nobody releases the buffer while - ** we are working with it + /* we do this to make sure nobody releases the buffer while + ** we are working with it */ get_bh(saved_bh); @@ -1497,8 +1497,8 @@ static int flush_journal_list(struct super_block *s, goto free_cnode; } - /* bh == NULL when the block got to disk on its own, OR, - ** the block got freed in a future transaction + /* bh == NULL when the block got to disk on its own, OR, + ** the block got freed in a future transaction */ if (saved_bh == NULL) { goto free_cnode; @@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s, __func__); flush_older_and_return: - /* before we can update the journal header block, we _must_ flush all + /* before we can update the journal header block, we _must_ flush all ** real blocks from all older transactions to disk. This is because ** once the header block is updated, this transaction will not be ** replayed after a crash @@ -1596,7 +1596,7 @@ static int flush_journal_list(struct super_block *s, } err = journal->j_errno; - /* before we can remove everything from the hash tables for this + /* before we can remove everything from the hash tables for this ** transaction, we must make sure it can never be replayed ** ** since we are only called from do_journal_end, we know for sure there @@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(struct super_block *p_s_sb, return 0; } -/* returns 0 if it did not find a description block +/* returns 0 if it did not find a description block ** returns -1 if it found a corrupt commit block -** returns 1 if both desc and commit were valid +** returns 1 if both desc and commit were valid */ static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, @@ -2380,8 +2380,8 @@ static int journal_read(struct super_block *p_s_sb) bdevname(journal->j_dev_bd, b)); start = get_seconds(); - /* step 1, read in the journal header block. Check the transaction it says - ** is the first unflushed, and if that transaction is not valid, + /* step 1, read in the journal header block. Check the transaction it says + ** is the first unflushed, and if that transaction is not valid, ** replay is done */ journal->j_header_bh = journal_bread(p_s_sb, @@ -2406,8 +2406,8 @@ static int journal_read(struct super_block *p_s_sb) le32_to_cpu(jh->j_last_flush_trans_id)); valid_journal_header = 1; - /* now, we try to read the first unflushed offset. If it is not valid, - ** there is nothing more we can do, and it makes no sense to read + /* now, we try to read the first unflushed offset. If it is not valid, + ** there is nothing more we can do, and it makes no sense to read ** through the whole log. */ d_bh = @@ -2919,7 +2919,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th, return 0; } -/* this must be called inside a transaction, and requires the +/* this must be called inside a transaction, and requires the ** kernel_lock to be held */ void reiserfs_block_writes(struct reiserfs_transaction_handle *th) @@ -3040,7 +3040,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, now = get_seconds(); /* if there is no room in the journal OR - ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning + ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning ** we don't sleep if there aren't other writers */ @@ -3240,7 +3240,7 @@ int journal_begin(struct reiserfs_transaction_handle *th, ** ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the ** transaction is committed. -** +** ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. */ int journal_mark_dirty(struct reiserfs_transaction_handle *th, @@ -3290,7 +3290,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, atomic_read(&(journal->j_wcount))); return 1; } - /* this error means I've screwed up, and we've overflowed the transaction. + /* this error means I've screwed up, and we've overflowed the transaction. ** Nothing can be done here, except make the FS readonly or panic. */ if (journal->j_len >= journal->j_trans_max) { @@ -3380,7 +3380,7 @@ int journal_end(struct reiserfs_transaction_handle *th, } } -/* removes from the current transaction, relsing and descrementing any counters. +/* removes from the current transaction, relsing and descrementing any counters. ** also files the removed buffer directly onto the clean list ** ** called by journal_mark_freed when a block has been deleted @@ -3478,7 +3478,7 @@ static int can_dirty(struct reiserfs_journal_cnode *cn) } /* syncs the commit blocks, but does not force the real buffers to disk -** will wait until the current transaction is done/committed before returning +** will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) @@ -3560,13 +3560,13 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb) /* ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit -** -** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all +** +** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just ** flushes the commit list and returns 0. ** ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. -** +** ** Note, we can't allow the journal_end to proceed while there are still writers in the log. */ static int check_journal_end(struct reiserfs_transaction_handle *th, @@ -3594,7 +3594,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, atomic_dec(&(journal->j_wcount)); } - /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released + /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released ** will be dealt with by next transaction that actually writes something, but should be taken ** care of in this trans */ @@ -3603,7 +3603,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, /* if wcount > 0, and we are called to with flush or commit_now, ** we wait on j_join_wait. We will wake up when the last writer has ** finished the transaction, and started it on its way to the disk. - ** Then, we flush the commit or journal list, and just return 0 + ** Then, we flush the commit or journal list, and just return 0 ** because the rest of journal end was already done for this transaction. */ if (atomic_read(&(journal->j_wcount)) > 0) { @@ -3674,7 +3674,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, /* ** Does all the work that makes deleting blocks safe. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. -** +** ** otherwise: ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes ** before this transaction has finished. @@ -3878,7 +3878,7 @@ extern struct tree_balance *cur_tb; ** be written to disk while we are altering it. So, we must: ** clean it ** wait on it. -** +** */ int reiserfs_prepare_for_journal(struct super_block *p_s_sb, struct buffer_head *bh, int wait) @@ -3920,7 +3920,7 @@ static void flush_old_journal_lists(struct super_block *s) } } -/* +/* ** long and ugly. If flush, will not return until all commit ** blocks and all real buffers in the trans are on disk. ** If no_async, won't return until all commit blocks are on disk. @@ -3981,7 +3981,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, wait_on_commit = 1; } - /* check_journal_end locks the journal, and unlocks if it does not return 1 + /* check_journal_end locks the journal, and unlocks if it does not return 1 ** it tells us if we should continue with the journal_end, or just return */ if (!check_journal_end(th, p_s_sb, nblocks, flags)) { @@ -4078,7 +4078,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, last_cn->next = jl_cn; } last_cn = jl_cn; - /* make sure the block we are trying to log is not a block + /* make sure the block we are trying to log is not a block of journal or reserved area */ if (is_block_in_log_or_reserved_area @@ -4225,9 +4225,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, } else if (!(jl->j_state & LIST_COMMIT_PENDING)) queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); - /* if the next transaction has any chance of wrapping, flush - ** transactions that might get overwritten. If any journal lists are very - ** old flush them as well. + /* if the next transaction has any chance of wrapping, flush + ** transactions that might get overwritten. If any journal lists are very + ** old flush them as well. */ first_jl: list_for_each_safe(entry, safe, &journal->j_journal_list) { diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 21a171ceba1d..381750a155f6 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi, DEH_SIZE * copy_count + copy_records_len); } -/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or - part of it or nothing (see the return 0 below) from SOURCE to the end +/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or + part of it or nothing (see the return 0 below) from SOURCE to the end (if last_first) or beginning (!last_first) of the DEST */ /* returns 1 if anything was copied, else 0 */ static int leaf_copy_boundary_item(struct buffer_info *dest_bi, @@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi, else { struct item_head n_ih; - /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST + /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST part defined by 'cpy_bytes'; create new item header; change old item_header (????); n_ih = new item_header; */ @@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi, else { struct item_head n_ih; - /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST + /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST part defined by 'cpy_bytes'; create new item header; n_ih = new item_header; */ @@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes) static void leaf_delete_items_entirely(struct buffer_info *bi, int first, int del_num); /* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR. - If not. + If not. If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of the first item. Part defined by del_bytes. Don't delete first item header If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of @@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, /* len = body len of item */ len = ih_item_len(ih); - /* delete the part of the last item of the bh + /* delete the part of the last item of the bh do not delete item header */ leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1, @@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before, } } -/* paste paste_size bytes to affected_item_num-th item. +/* paste paste_size bytes to affected_item_num-th item. When item is a directory, this only prepare space for new entries */ void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num, int pos_in_item, int paste_size, @@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffer_head *bh, /* when cut item is part of regular file pos_in_item - first byte that must be cut cut_size - number of bytes to be cut beginning from pos_in_item - + when cut item is part of directory pos_in_item - number of first deleted entry cut_size - count of deleted entries @@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_info *bi, /* change item key if necessary (when we paste before 0-th entry */ if (!before) { set_le_ih_k_offset(ih, deh_offset(new_dehs)); -/* memcpy (&ih->ih_key.k_offset, +/* memcpy (&ih->ih_key.k_offset, &new_dehs->deh_offset, SHORT_KEY_SIZE);*/ } #ifdef CONFIG_REISERFS_CHECK diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index cb1a9e977907..9d1070e741fc 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -106,7 +106,7 @@ key of the first directory entry in it. This function first calls search_by_key, then, if item whose first entry matches is not found it looks for the entry inside directory item found by search_by_key. Fills the path to the entry, and to the -entry position in the item +entry position in the item */ @@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); } -/* +/* ** looks up the dentry of the parent directory for child. ** taken from ext2_get_parent */ @@ -401,7 +401,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child) return d_obtain_alias(inode); } -/* add entry to the directory (entry can be hidden). +/* add entry to the directory (entry can be hidden). insert definition of when hidden directories are used here -Hans @@ -559,7 +559,7 @@ static int drop_new_inode(struct inode *inode) return 0; } -/* utility function that does setup for reiserfs_new_inode. +/* utility function that does setup for reiserfs_new_inode. ** DQUOT_INIT needs lots of credits so it's better to have it ** outside of a transaction, so we had to pull some bits of ** reiserfs_new_inode out into this func. @@ -820,7 +820,7 @@ static inline int reiserfs_empty_dir(struct inode *inode) { /* we can cheat because an old format dir cannot have ** EMPTY_DIR_SIZE, and a new format dir cannot have - ** EMPTY_DIR_SIZE_V1. So, if the inode is either size, + ** EMPTY_DIR_SIZE_V1. So, if the inode is either size, ** regardless of disk format version, the directory is empty. */ if (inode->i_size != EMPTY_DIR_SIZE && @@ -1162,7 +1162,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir, return retval; } -// de contains information pointing to an entry which +/* de contains information pointing to an entry which */ static int de_still_valid(const char *name, int len, struct reiserfs_dir_entry *de) { @@ -1206,10 +1206,10 @@ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de, de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid; } -/* +/* * process, that is going to call fix_nodes/do_balance must hold only * one path. If it holds 2 or more, it can get into endless waiting in - * get_empty_nodes or its clones + * get_empty_nodes or its clones */ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) @@ -1263,7 +1263,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode_mode = old_inode->i_mode; if (S_ISDIR(old_inode_mode)) { - // make sure, that directory being renamed has correct ".." + // make sure, that directory being renamed has correct ".." // and that its new parent directory has not too many links // already @@ -1274,8 +1274,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, } } - /* directory is renamed, its parent directory will be changed, - ** so find ".." entry + /* directory is renamed, its parent directory will be changed, + ** so find ".." entry */ dot_dot_de.de_gen_number_bit_string = NULL; retval = @@ -1385,9 +1385,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, this stuff, yes? Then, having gathered everything into RAM we should lock the buffers, yes? -Hans */ - /* probably. our rename needs to hold more - ** than one path at once. The seals would - ** have to be written to deal with multi-path + /* probably. our rename needs to hold more + ** than one path at once. The seals would + ** have to be written to deal with multi-path ** issues -chris */ /* sanity checking before doing the rename - avoid races many @@ -1465,7 +1465,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, } if (S_ISDIR(old_inode_mode)) { - // adjust ".." of renamed directory + /* adjust ".." of renamed directory */ set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir)); journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh); diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c index d2d6b5650188..3a6de810bd61 100644 --- a/fs/reiserfs/objectid.c +++ b/fs/reiserfs/objectid.c @@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s) if (cur_size > new_size) { /* mark everyone used that was listed as free at the end of the objectid - ** map + ** map */ objectid_map[new_size - 1] = objectid_map[cur_size - 1]; set_sb_oid_cursize(disk_sb, new_size); diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 8e826c07cd21..536eacaeb710 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -178,11 +178,11 @@ static char *is_there_reiserfs_struct(char *fmt, int *what) appropriative printk. With this reiserfs_warning you can use format specification for complex structures like you used to do with printfs for integers, doubles and pointers. For instance, to print - out key structure you have to write just: - reiserfs_warning ("bad key %k", key); - instead of - printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid, - key->k_offset, key->k_uniqueness); + out key structure you have to write just: + reiserfs_warning ("bad key %k", key); + instead of + printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid, + key->k_offset, key->k_uniqueness); */ static DEFINE_SPINLOCK(error_lock); static void prepare_error_buf(const char *fmt, va_list args) @@ -244,11 +244,11 @@ static void prepare_error_buf(const char *fmt, va_list args) } /* in addition to usual conversion specifiers this accepts reiserfs - specific conversion specifiers: - %k to print little endian key, - %K to print cpu key, + specific conversion specifiers: + %k to print little endian key, + %K to print cpu key, %h to print item_head, - %t to print directory entry + %t to print directory entry %z to print block head (arg must be struct buffer_head * %b to print buffer_head */ @@ -314,17 +314,17 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...) maintainer-errorid. Don't bother with reusing errorids, there are lots of numbers out there. - Example: - + Example: + reiserfs_panic( p_sb, "reiser-29: reiserfs_new_blocknrs: " "one of search_start or rn(%d) is equal to MAX_B_NUM," - "which means that we are optimizing location based on the bogus location of a temp buffer (%p).", + "which means that we are optimizing location based on the bogus location of a temp buffer (%p).", rn, bh ); Regular panic()s sometimes clear the screen before the message can - be read, thus the need for the while loop. + be read, thus the need for the while loop. Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it pointless complexity): diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index d4d7f1433ed0..d5066400638a 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -633,7 +633,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, * */ -/* +/* * Make Linus happy. * Local variables: * c-indentation-style: "K&R" diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c index f71c3948edef..238e9d9b31e0 100644 --- a/fs/reiserfs/resize.c +++ b/fs/reiserfs/resize.c @@ -1,8 +1,8 @@ -/* +/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ -/* +/* * Written by Alexander Zarochentcev. * * The kernel part of the (on-line) reiserfs resizer. @@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new) memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size); /* just in case vfree schedules on us, copy the new - ** pointer into the journal struct before freeing the + ** pointer into the journal struct before freeing the ** old one */ node_tmp = jb->bitmaps; diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index b2eaa0c6b7b7..a65bfee28bb8 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -77,7 +77,7 @@ inline void copy_item_head(struct item_head *p_v_to, /* k1 is pointer to on-disk structure which is stored in little-endian form. k2 is pointer to cpu variable. For key of items of the same object this returns 0. - Returns: -1 if key1 < key2 + Returns: -1 if key1 < key2 0 if key1 == key2 1 if key1 > key2 */ inline int comp_short_keys(const struct reiserfs_key *le_key, @@ -890,7 +890,7 @@ static inline int prepare_for_direct_item(struct treepath *path, } // new file gets truncated if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) { - // + // round_len = ROUND_UP(new_file_length); /* this was n_new_file_length < le_ih ... */ if (round_len < le_ih_k_offset(le_ih)) { @@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, if (atomic_read(&p_s_inode->i_count) > 1 || !tail_has_to_be_packed(p_s_inode) || !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) { - // leave tail in an unformatted node + /* leave tail in an unformatted node */ *p_c_mode = M_SKIP_BALANCING; cut_bytes = n_block_size - (n_new_file_size & (n_block_size - 1)); @@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p /* While there are bytes to truncate and previous file item is presented in the tree. */ /* - ** This loop could take a really long time, and could log + ** This loop could take a really long time, and could log ** many more blocks than a transaction can hold. So, we do a polite ** journal end here, and if the transaction needs ending, we make ** sure the file is consistent before ending the current trans diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 4a1e16362ebd..d7519b951500 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -758,7 +758,7 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts, char **opt_arg, unsigned long *bit_flags) { char *p; - /* foo=bar, + /* foo=bar, ^ ^ ^ | | +-- option_end | +-- arg_start @@ -1348,7 +1348,7 @@ static int read_super_block(struct super_block *s, int offset) } // // ok, reiserfs signature (old or new) found in at the given offset - // + // fs_blocksize = sb_blocksize(rs); brelse(bh); sb_set_blocksize(s, fs_blocksize); @@ -1534,8 +1534,8 @@ static int what_hash(struct super_block *s) code = find_hash_out(s); if (code != UNSET_HASH && reiserfs_hash_detect(s)) { - /* detection has found the hash, and we must check against the - ** mount options + /* detection has found the hash, and we must check against the + ** mount options */ if (reiserfs_rupasov_hash(s) && code != YURA_HASH) { reiserfs_warning(s, "reiserfs-2507", @@ -1567,7 +1567,7 @@ static int what_hash(struct super_block *s) } } - /* if we are mounted RW, and we have a new valid hash code, update + /* if we are mounted RW, and we have a new valid hash code, update ** the super */ if (code != UNSET_HASH && diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index 083f74435f65..0635cfe0f0b7 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, /* Set the key to search for the place for new unfm pointer */ make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4); - // FIXME: we could avoid this + /* FIXME: we could avoid this */ if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) { reiserfs_error(sb, "PAP-14030", "pasted or inserted byte exists in " diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 4686b90886ed..5621d87c4479 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -14,7 +14,7 @@ typedef enum { } reiserfs_super_block_flags; /* struct reiserfs_super_block accessors/mutators - * since this is a disk structure, it will always be in + * since this is a disk structure, it will always be in * little endian format. */ #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) @@ -83,16 +83,16 @@ typedef enum { /* LOGGING -- */ -/* These all interelate for performance. +/* These all interelate for performance. ** -** If the journal block count is smaller than n transactions, you lose speed. +** If the journal block count is smaller than n transactions, you lose speed. ** I don't know what n is yet, I'm guessing 8-16. ** ** typical transaction size depends on the application, how often fsync is -** called, and how many metadata blocks you dirty in a 30 second period. +** called, and how many metadata blocks you dirty in a 30 second period. ** The more small files (<16k) you use, the larger your transactions will ** be. -** +** ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough ** to prevent wrapping before dirty meta blocks get to disk. @@ -242,7 +242,7 @@ struct reiserfs_journal { struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ - struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all + struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all the transactions */ struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ int j_persistent_trans; @@ -426,7 +426,7 @@ enum reiserfs_mount_options { partition will be dealt with in a manner of 3.5.x */ -/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting +/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option ** is not required. If the normal autodection code can't determine which ** hash to use (because both hashes had the same value for a file) -- cgit v1.2.3-59-g8ed1b From a9dd364358fbdc68faee5d20c2d648c320dc3cf0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:45 -0400 Subject: reiserfs: rename p_s_sb to sb This patch is a simple s/p_s_sb/sb/g to the reiserfs code. This is the first in a series of patches to rip out some of the awful variable naming in reiserfs. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/fix_node.c | 46 +-- fs/reiserfs/journal.c | 735 +++++++++++++++++++++--------------------- fs/reiserfs/stree.c | 126 ++++---- fs/reiserfs/tail_conversion.c | 16 +- include/linux/reiserfs_fs.h | 14 +- 5 files changed, 468 insertions(+), 469 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index a3be7da3e2b9..799c0ce24291 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -785,7 +785,7 @@ static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h) b_blocknr_t *p_n_blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */ n_retval = CARRY_ON; - struct super_block *p_s_sb = p_s_tb->tb_sb; + struct super_block *sb = p_s_tb->tb_sb; /* number_of_freeblk is the number of empty blocks which have been acquired for use by the balancing algorithm minus the number of @@ -830,7 +830,7 @@ static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h) RFALSE(!*p_n_blocknr, "PAP-8135: reiserfs_new_blocknrs failed when got new blocks"); - p_s_new_bh = sb_getblk(p_s_sb, *p_n_blocknr); + p_s_new_bh = sb_getblk(sb, *p_n_blocknr); RFALSE(buffer_dirty(p_s_new_bh) || buffer_journaled(p_s_new_bh) || buffer_journal_dirty(p_s_new_bh), @@ -899,7 +899,7 @@ static int get_rfree(struct tree_balance *tb, int h) static int is_left_neighbor_in_cache(struct tree_balance *p_s_tb, int n_h) { struct buffer_head *p_s_father, *left; - struct super_block *p_s_sb = p_s_tb->tb_sb; + struct super_block *sb = p_s_tb->tb_sb; b_blocknr_t n_left_neighbor_blocknr; int n_left_neighbor_position; @@ -924,7 +924,7 @@ static int is_left_neighbor_in_cache(struct tree_balance *p_s_tb, int n_h) n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position); /* Look for the left neighbor in the cache. */ - if ((left = sb_find_get_block(p_s_sb, n_left_neighbor_blocknr))) { + if ((left = sb_find_get_block(sb, n_left_neighbor_blocknr))) { RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left), "vs-8170: left neighbor (%b %z) is not in the tree", @@ -1942,14 +1942,14 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) int n_child_position, n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1); unsigned long n_son_number; - struct super_block *p_s_sb = p_s_tb->tb_sb; + struct super_block *sb = p_s_tb->tb_sb; struct buffer_head *p_s_bh; - PROC_INFO_INC(p_s_sb, get_neighbors[n_h]); + PROC_INFO_INC(sb, get_neighbors[n_h]); if (p_s_tb->lnum[n_h]) { /* We need left neighbor to balance S[n_h]. */ - PROC_INFO_INC(p_s_sb, need_l_neighbor[n_h]); + PROC_INFO_INC(sb, need_l_neighbor[n_h]); p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); RFALSE(p_s_bh == p_s_tb->FL[n_h] && @@ -1961,12 +1961,12 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) p_s_tb->FL[n_h]) ? p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb-> FL[n_h]); n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position); - p_s_bh = sb_bread(p_s_sb, n_son_number); + p_s_bh = sb_bread(sb, n_son_number); if (!p_s_bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { brelse(p_s_bh); - PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]); + PROC_INFO_INC(sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } @@ -1986,7 +1986,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) } if (p_s_tb->rnum[n_h]) { /* We need right neighbor to balance S[n_path_offset]. */ - PROC_INFO_INC(p_s_sb, need_r_neighbor[n_h]); + PROC_INFO_INC(sb, need_r_neighbor[n_h]); p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); RFALSE(p_s_bh == p_s_tb->FR[n_h] && @@ -1998,12 +1998,12 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) n_child_position = (p_s_bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0; n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position); - p_s_bh = sb_bread(p_s_sb, n_son_number); + p_s_bh = sb_bread(sb, n_son_number); if (!p_s_bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { brelse(p_s_bh); - PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]); + PROC_INFO_INC(sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } brelse(p_s_tb->R[n_h]); @@ -2089,51 +2089,51 @@ static int get_mem_for_virtual_node(struct tree_balance *tb) } #ifdef CONFIG_REISERFS_CHECK -static void tb_buffer_sanity_check(struct super_block *p_s_sb, +static void tb_buffer_sanity_check(struct super_block *sb, struct buffer_head *p_s_bh, const char *descr, int level) { if (p_s_bh) { if (atomic_read(&(p_s_bh->b_count)) <= 0) { - reiserfs_panic(p_s_sb, "jmacd-1", "negative or zero " + reiserfs_panic(sb, "jmacd-1", "negative or zero " "reference counter for buffer %s[%d] " "(%b)", descr, level, p_s_bh); } if (!buffer_uptodate(p_s_bh)) { - reiserfs_panic(p_s_sb, "jmacd-2", "buffer is not up " + reiserfs_panic(sb, "jmacd-2", "buffer is not up " "to date %s[%d] (%b)", descr, level, p_s_bh); } if (!B_IS_IN_TREE(p_s_bh)) { - reiserfs_panic(p_s_sb, "jmacd-3", "buffer is not " + reiserfs_panic(sb, "jmacd-3", "buffer is not " "in tree %s[%d] (%b)", descr, level, p_s_bh); } - if (p_s_bh->b_bdev != p_s_sb->s_bdev) { - reiserfs_panic(p_s_sb, "jmacd-4", "buffer has wrong " + if (p_s_bh->b_bdev != sb->s_bdev) { + reiserfs_panic(sb, "jmacd-4", "buffer has wrong " "device %s[%d] (%b)", descr, level, p_s_bh); } - if (p_s_bh->b_size != p_s_sb->s_blocksize) { - reiserfs_panic(p_s_sb, "jmacd-5", "buffer has wrong " + if (p_s_bh->b_size != sb->s_blocksize) { + reiserfs_panic(sb, "jmacd-5", "buffer has wrong " "blocksize %s[%d] (%b)", descr, level, p_s_bh); } - if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) { - reiserfs_panic(p_s_sb, "jmacd-6", "buffer block " + if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(sb)) { + reiserfs_panic(sb, "jmacd-6", "buffer block " "number too high %s[%d] (%b)", descr, level, p_s_bh); } } } #else -static void tb_buffer_sanity_check(struct super_block *p_s_sb, +static void tb_buffer_sanity_check(struct super_block *sb, struct buffer_head *p_s_bh, const char *descr, int level) {; diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 4f787462becc..77f5bb746bf0 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -97,7 +97,7 @@ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int can_dirty(struct reiserfs_journal_cnode *cn); static int journal_join(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks); + struct super_block *sb, unsigned long nblocks); static int release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, @@ -113,12 +113,12 @@ enum { }; static int do_journal_begin_r(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, + struct super_block *sb, unsigned long nblocks, int join); -static void init_journal_hash(struct super_block *p_s_sb) +static void init_journal_hash(struct super_block *sb) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); } @@ -145,7 +145,7 @@ static void disable_barrier(struct super_block *s) } static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block - *p_s_sb) + *sb) { struct reiserfs_bitmap_node *bn; static int id; @@ -154,7 +154,7 @@ static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block if (!bn) { return NULL; } - bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS); + bn->data = kzalloc(sb->s_blocksize, GFP_NOFS); if (!bn->data) { kfree(bn); return NULL; @@ -164,9 +164,9 @@ static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block return bn; } -static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb) +static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; struct list_head *entry = journal->j_bitmap_nodes.next; @@ -176,21 +176,21 @@ static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb) if (entry != &journal->j_bitmap_nodes) { bn = list_entry(entry, struct reiserfs_bitmap_node, list); list_del(entry); - memset(bn->data, 0, p_s_sb->s_blocksize); + memset(bn->data, 0, sb->s_blocksize); journal->j_free_bitmap_nodes--; return bn; } - bn = allocate_bitmap_node(p_s_sb); + bn = allocate_bitmap_node(sb); if (!bn) { yield(); goto repeat; } return bn; } -static inline void free_bitmap_node(struct super_block *p_s_sb, +static inline void free_bitmap_node(struct super_block *sb, struct reiserfs_bitmap_node *bn) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); journal->j_used_bitmap_nodes--; if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) { kfree(bn->data); @@ -201,13 +201,13 @@ static inline void free_bitmap_node(struct super_block *p_s_sb, } } -static void allocate_bitmap_nodes(struct super_block *p_s_sb) +static void allocate_bitmap_nodes(struct super_block *sb) { int i; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) { - bn = allocate_bitmap_node(p_s_sb); + bn = allocate_bitmap_node(sb); if (bn) { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; @@ -217,30 +217,30 @@ static void allocate_bitmap_nodes(struct super_block *p_s_sb) } } -static int set_bit_in_list_bitmap(struct super_block *p_s_sb, +static int set_bit_in_list_bitmap(struct super_block *sb, b_blocknr_t block, struct reiserfs_list_bitmap *jb) { - unsigned int bmap_nr = block / (p_s_sb->s_blocksize << 3); - unsigned int bit_nr = block % (p_s_sb->s_blocksize << 3); + unsigned int bmap_nr = block / (sb->s_blocksize << 3); + unsigned int bit_nr = block % (sb->s_blocksize << 3); if (!jb->bitmaps[bmap_nr]) { - jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb); + jb->bitmaps[bmap_nr] = get_bitmap_node(sb); } set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data); return 0; } -static void cleanup_bitmap_list(struct super_block *p_s_sb, +static void cleanup_bitmap_list(struct super_block *sb, struct reiserfs_list_bitmap *jb) { int i; if (jb->bitmaps == NULL) return; - for (i = 0; i < reiserfs_bmap_count(p_s_sb); i++) { + for (i = 0; i < reiserfs_bmap_count(sb); i++) { if (jb->bitmaps[i]) { - free_bitmap_node(p_s_sb, jb->bitmaps[i]); + free_bitmap_node(sb, jb->bitmaps[i]); jb->bitmaps[i] = NULL; } } @@ -249,7 +249,7 @@ static void cleanup_bitmap_list(struct super_block *p_s_sb, /* ** only call this on FS unmount. */ -static int free_list_bitmaps(struct super_block *p_s_sb, +static int free_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array) { int i; @@ -257,16 +257,16 @@ static int free_list_bitmaps(struct super_block *p_s_sb, for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; - cleanup_bitmap_list(p_s_sb, jb); + cleanup_bitmap_list(sb, jb); vfree(jb->bitmaps); jb->bitmaps = NULL; } return 0; } -static int free_bitmap_nodes(struct super_block *p_s_sb) +static int free_bitmap_nodes(struct super_block *sb) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct list_head *next = journal->j_bitmap_nodes.next; struct reiserfs_bitmap_node *bn; @@ -286,7 +286,7 @@ static int free_bitmap_nodes(struct super_block *p_s_sb) ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** jb_array is the array to be filled in. */ -int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, +int reiserfs_allocate_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array, unsigned int bmap_nr) { @@ -300,7 +300,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, jb->journal_list = NULL; jb->bitmaps = vmalloc(mem); if (!jb->bitmaps) { - reiserfs_warning(p_s_sb, "clm-2000", "unable to " + reiserfs_warning(sb, "clm-2000", "unable to " "allocate bitmaps for journal lists"); failed = 1; break; @@ -308,7 +308,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, memset(jb->bitmaps, 0, mem); } if (failed) { - free_list_bitmaps(p_s_sb, jb_array); + free_list_bitmaps(sb, jb_array); return -1; } return 0; @@ -318,12 +318,12 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, ** find an available list bitmap. If you can't find one, flush a commit list ** and try again */ -static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, +static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb, struct reiserfs_journal_list *jl) { int i, j; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_list_bitmap *jb = NULL; for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) { @@ -331,7 +331,7 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS; jb = journal->j_list_bitmap + i; if (journal->j_list_bitmap[i].journal_list) { - flush_commit_list(p_s_sb, + flush_commit_list(sb, journal->j_list_bitmap[i]. journal_list, 1); if (!journal->j_list_bitmap[i].journal_list) { @@ -378,12 +378,12 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) /* ** pulls a cnode off the free list, or returns NULL on failure */ -static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) +static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb) { struct reiserfs_journal_cnode *cn; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); - reiserfs_check_lock_depth(p_s_sb, "get_cnode"); + reiserfs_check_lock_depth(sb, "get_cnode"); if (journal->j_cnode_free <= 0) { return NULL; @@ -405,12 +405,12 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) /* ** returns a cnode to the free list */ -static void free_cnode(struct super_block *p_s_sb, +static void free_cnode(struct super_block *sb, struct reiserfs_journal_cnode *cn) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); - reiserfs_check_lock_depth(p_s_sb, "free_cnode"); + reiserfs_check_lock_depth(sb, "free_cnode"); journal->j_cnode_used--; journal->j_cnode_free++; @@ -481,11 +481,11 @@ static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct ** reject it on the next call to reiserfs_in_journal ** */ -int reiserfs_in_journal(struct super_block *p_s_sb, +int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, int bit_nr, int search_all, b_blocknr_t * next_zero_bit) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn; struct reiserfs_list_bitmap *jb; int i; @@ -493,14 +493,14 @@ int reiserfs_in_journal(struct super_block *p_s_sb, *next_zero_bit = 0; /* always start this at zero. */ - PROC_INFO_INC(p_s_sb, journal.in_journal); + PROC_INFO_INC(sb, journal.in_journal); /* If we aren't doing a search_all, this is a metablock, and it will be logged before use. ** if we crash before the transaction that freed it commits, this transaction won't ** have committed either, and the block will never be written */ if (search_all) { for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { - PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap); + PROC_INFO_INC(sb, journal.in_journal_bitmap); jb = journal->j_list_bitmap + i; if (jb->journal_list && jb->bitmaps[bmap_nr] && test_bit(bit_nr, @@ -510,28 +510,28 @@ int reiserfs_in_journal(struct super_block *p_s_sb, find_next_zero_bit((unsigned long *) (jb->bitmaps[bmap_nr]-> data), - p_s_sb->s_blocksize << 3, + sb->s_blocksize << 3, bit_nr + 1); return 1; } } } - bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr; + bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr; /* is it in any old transactions? */ if (search_all && (cn = - get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) { + get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) { return 1; } /* is it in the current transaction. This should never happen */ - if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) { + if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) { BUG(); return 1; } - PROC_INFO_INC(p_s_sb, journal.in_journal_reusable); + PROC_INFO_INC(sb, journal.in_journal_reusable); /* safe for reuse */ return 0; } @@ -553,16 +553,16 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, } /* lock the current transaction */ -static inline void lock_journal(struct super_block *p_s_sb) +static inline void lock_journal(struct super_block *sb) { - PROC_INFO_INC(p_s_sb, journal.lock_journal); - mutex_lock(&SB_JOURNAL(p_s_sb)->j_mutex); + PROC_INFO_INC(sb, journal.lock_journal); + mutex_lock(&SB_JOURNAL(sb)->j_mutex); } /* unlock the current transaction */ -static inline void unlock_journal(struct super_block *p_s_sb) +static inline void unlock_journal(struct super_block *sb) { - mutex_unlock(&SB_JOURNAL(p_s_sb)->j_mutex); + mutex_unlock(&SB_JOURNAL(sb)->j_mutex); } static inline void get_journal_list(struct reiserfs_journal_list *jl) @@ -586,13 +586,13 @@ static inline void put_journal_list(struct super_block *s, ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a ** transaction. */ -static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, +static void cleanup_freed_for_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl) { struct reiserfs_list_bitmap *jb = jl->j_list_bitmap; if (jb) { - cleanup_bitmap_list(p_s_sb, jb); + cleanup_bitmap_list(sb, jb); } jl->j_list_bitmap->journal_list = NULL; jl->j_list_bitmap = NULL; @@ -1237,11 +1237,11 @@ static void remove_journal_hash(struct super_block *, ** journal list for this transaction. Aside from freeing the cnode, this also allows the ** block to be reallocated for data blocks if it had been deleted. */ -static void remove_all_from_journal_list(struct super_block *p_s_sb, +static void remove_all_from_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl, int debug) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *last; cn = jl->j_realblock; @@ -1251,18 +1251,18 @@ static void remove_all_from_journal_list(struct super_block *p_s_sb, while (cn) { if (cn->blocknr != 0) { if (debug) { - reiserfs_warning(p_s_sb, "reiserfs-2201", + reiserfs_warning(sb, "reiserfs-2201", "block %u, bh is %d, state %ld", cn->blocknr, cn->bh ? 1 : 0, cn->state); } cn->state = 0; - remove_journal_hash(p_s_sb, journal->j_list_hash_table, + remove_journal_hash(sb, journal->j_list_hash_table, jl, cn->blocknr, 1); } last = cn; cn = cn->next; - free_cnode(p_s_sb, last); + free_cnode(sb, last); } jl->j_realblock = NULL; } @@ -1274,12 +1274,12 @@ static void remove_all_from_journal_list(struct super_block *p_s_sb, ** called by flush_journal_list, before it calls remove_all_from_journal_list ** */ -static int _update_journal_header_block(struct super_block *p_s_sb, +static int _update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { struct reiserfs_journal_header *jh; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); if (reiserfs_is_journal_aborted(journal)) return -EIO; @@ -1289,7 +1289,7 @@ static int _update_journal_header_block(struct super_block *p_s_sb, wait_on_buffer((journal->j_header_bh)); if (unlikely(!buffer_uptodate(journal->j_header_bh))) { #ifdef CONFIG_REISERFS_CHECK - reiserfs_warning(p_s_sb, "journal-699", + reiserfs_warning(sb, "journal-699", "buffer write failed"); #endif return -EIO; @@ -1303,24 +1303,24 @@ static int _update_journal_header_block(struct super_block *p_s_sb, jh->j_first_unflushed_offset = cpu_to_le32(offset); jh->j_mount_id = cpu_to_le32(journal->j_mount_id); - if (reiserfs_barrier_flush(p_s_sb)) { + if (reiserfs_barrier_flush(sb)) { int ret; lock_buffer(journal->j_header_bh); ret = submit_barrier_buffer(journal->j_header_bh); if (ret == -EOPNOTSUPP) { set_buffer_uptodate(journal->j_header_bh); - disable_barrier(p_s_sb); + disable_barrier(sb); goto sync; } wait_on_buffer(journal->j_header_bh); - check_barrier_completion(p_s_sb, journal->j_header_bh); + check_barrier_completion(sb, journal->j_header_bh); } else { sync: set_buffer_dirty(journal->j_header_bh); sync_dirty_buffer(journal->j_header_bh); } if (!buffer_uptodate(journal->j_header_bh)) { - reiserfs_warning(p_s_sb, "journal-837", + reiserfs_warning(sb, "journal-837", "IO error during journal replay"); return -EIO; } @@ -1328,23 +1328,23 @@ static int _update_journal_header_block(struct super_block *p_s_sb, return 0; } -static int update_journal_header_block(struct super_block *p_s_sb, +static int update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { - return _update_journal_header_block(p_s_sb, offset, trans_id); + return _update_journal_header_block(sb, offset, trans_id); } /* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list */ -static int flush_older_journal_lists(struct super_block *p_s_sb, +static int flush_older_journal_lists(struct super_block *sb, struct reiserfs_journal_list *jl) { struct list_head *entry; struct reiserfs_journal_list *other_jl; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned int trans_id = jl->j_trans_id; /* we know we are the only ones flushing things, no extra race @@ -1359,7 +1359,7 @@ static int flush_older_journal_lists(struct super_block *p_s_sb, if (other_jl->j_trans_id < trans_id) { BUG_ON(other_jl->j_refcount <= 0); /* do not flush all */ - flush_journal_list(p_s_sb, other_jl, 0); + flush_journal_list(sb, other_jl, 0); /* other_jl is now deleted from the list */ goto restart; @@ -1908,22 +1908,22 @@ void remove_journal_hash(struct super_block *sb, } } -static void free_journal_ram(struct super_block *p_s_sb) +static void free_journal_ram(struct super_block *sb) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); kfree(journal->j_current_jl); journal->j_num_lists--; vfree(journal->j_cnode_free_orig); - free_list_bitmaps(p_s_sb, journal->j_list_bitmap); - free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */ + free_list_bitmaps(sb, journal->j_list_bitmap); + free_bitmap_nodes(sb); /* must be after free_list_bitmaps */ if (journal->j_header_bh) { brelse(journal->j_header_bh); } /* j_header_bh is on the journal dev, make sure not to release the journal * dev until we brelse j_header_bh */ - release_journal_dev(p_s_sb, journal); + release_journal_dev(sb, journal); vfree(journal); } @@ -1932,27 +1932,27 @@ static void free_journal_ram(struct super_block *p_s_sb) ** of read_super() yet. Any other caller must keep error at 0. */ static int do_journal_release(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, int error) + struct super_block *sb, int error) { struct reiserfs_transaction_handle myth; int flushed = 0; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); /* we only want to flush out transactions if we were called with error == 0 */ - if (!error && !(p_s_sb->s_flags & MS_RDONLY)) { + if (!error && !(sb->s_flags & MS_RDONLY)) { /* end the current trans */ BUG_ON(!th->t_trans_id); - do_journal_end(th, p_s_sb, 10, FLUSH_ALL); + do_journal_end(th, sb, 10, FLUSH_ALL); /* make sure something gets logged to force our way into the flush code */ - if (!journal_join(&myth, p_s_sb, 1)) { - reiserfs_prepare_for_journal(p_s_sb, - SB_BUFFER_WITH_SB(p_s_sb), + if (!journal_join(&myth, sb, 1)) { + reiserfs_prepare_for_journal(sb, + SB_BUFFER_WITH_SB(sb), 1); - journal_mark_dirty(&myth, p_s_sb, - SB_BUFFER_WITH_SB(p_s_sb)); - do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL); + journal_mark_dirty(&myth, sb, + SB_BUFFER_WITH_SB(sb)); + do_journal_end(&myth, sb, 1, FLUSH_ALL); flushed = 1; } } @@ -1960,26 +1960,26 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, /* this also catches errors during the do_journal_end above */ if (!error && reiserfs_is_journal_aborted(journal)) { memset(&myth, 0, sizeof(myth)); - if (!journal_join_abort(&myth, p_s_sb, 1)) { - reiserfs_prepare_for_journal(p_s_sb, - SB_BUFFER_WITH_SB(p_s_sb), + if (!journal_join_abort(&myth, sb, 1)) { + reiserfs_prepare_for_journal(sb, + SB_BUFFER_WITH_SB(sb), 1); - journal_mark_dirty(&myth, p_s_sb, - SB_BUFFER_WITH_SB(p_s_sb)); - do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL); + journal_mark_dirty(&myth, sb, + SB_BUFFER_WITH_SB(sb)); + do_journal_end(&myth, sb, 1, FLUSH_ALL); } } reiserfs_mounted_fs_count--; /* wait for all commits to finish */ - cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work); + cancel_delayed_work(&SB_JOURNAL(sb)->j_work); flush_workqueue(commit_wq); if (!reiserfs_mounted_fs_count) { destroy_workqueue(commit_wq); commit_wq = NULL; } - free_journal_ram(p_s_sb); + free_journal_ram(sb); return 0; } @@ -1988,28 +1988,28 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, ** call on unmount. flush all journal trans, release all alloc'd ram */ int journal_release(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb) + struct super_block *sb) { - return do_journal_release(th, p_s_sb, 0); + return do_journal_release(th, sb, 0); } /* ** only call from an error condition inside reiserfs_read_super! */ int journal_release_error(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb) + struct super_block *sb) { - return do_journal_release(th, p_s_sb, 1); + return do_journal_release(th, sb, 1); } /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */ -static int journal_compare_desc_commit(struct super_block *p_s_sb, +static int journal_compare_desc_commit(struct super_block *sb, struct reiserfs_journal_desc *desc, struct reiserfs_journal_commit *commit) { if (get_commit_trans_id(commit) != get_desc_trans_id(desc) || get_commit_trans_len(commit) != get_desc_trans_len(desc) || - get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max || + get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max || get_commit_trans_len(commit) <= 0) { return 1; } @@ -2020,7 +2020,7 @@ static int journal_compare_desc_commit(struct super_block *p_s_sb, ** returns -1 if it found a corrupt commit block ** returns 1 if both desc and commit were valid */ -static int journal_transaction_is_valid(struct super_block *p_s_sb, +static int journal_transaction_is_valid(struct super_block *sb, struct buffer_head *d_bh, unsigned int *oldest_invalid_trans_id, unsigned long *newest_mount_id) @@ -2038,7 +2038,7 @@ static int journal_transaction_is_valid(struct super_block *p_s_sb, && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) { if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-986: transaction " "is valid returning because trans_id %d is greater than " "oldest_invalid %lu", @@ -2048,7 +2048,7 @@ static int journal_transaction_is_valid(struct super_block *p_s_sb, } if (newest_mount_id && *newest_mount_id > get_desc_mount_id(desc)) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1087: transaction " "is valid returning because mount_id %d is less than " "newest_mount_id %lu", @@ -2056,37 +2056,37 @@ static int journal_transaction_is_valid(struct super_block *p_s_sb, *newest_mount_id); return -1; } - if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) { - reiserfs_warning(p_s_sb, "journal-2018", + if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) { + reiserfs_warning(sb, "journal-2018", "Bad transaction length %d " "encountered, ignoring transaction", get_desc_trans_len(desc)); return -1; } - offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); + offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); /* ok, we have a journal description block, lets see if the transaction was valid */ c_bh = - journal_bread(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + journal_bread(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((offset + get_desc_trans_len(desc) + - 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))); + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) return 0; commit = (struct reiserfs_journal_commit *)c_bh->b_data; - if (journal_compare_desc_commit(p_s_sb, desc, commit)) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + if (journal_compare_desc_commit(sb, desc, commit)) { + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_transaction_is_valid, commit offset %ld had bad " "time %d or length %d", c_bh->b_blocknr - - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), + SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); if (oldest_invalid_trans_id) { *oldest_invalid_trans_id = get_desc_trans_id(desc); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1004: " "transaction_is_valid setting oldest invalid trans_id " "to %d", @@ -2095,11 +2095,11 @@ static int journal_transaction_is_valid(struct super_block *p_s_sb, return -1; } brelse(c_bh); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1006: found valid " "transaction start offset %llu, len %d id %d", d_bh->b_blocknr - - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), + SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_trans_id(desc)); return 1; @@ -2121,13 +2121,13 @@ static void brelse_array(struct buffer_head **heads, int num) ** this either reads in a replays a transaction, or returns because the transaction ** is invalid, or too old. */ -static int journal_read_transaction(struct super_block *p_s_sb, +static int journal_read_transaction(struct super_block *sb, unsigned long cur_dblock, unsigned long oldest_start, unsigned int oldest_trans_id, unsigned long newest_mount_id) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; unsigned int trans_id = 0; @@ -2139,45 +2139,45 @@ static int journal_read_transaction(struct super_block *p_s_sb, int i; int trans_half; - d_bh = journal_bread(p_s_sb, cur_dblock); + d_bh = journal_bread(sb, cur_dblock); if (!d_bh) return 1; desc = (struct reiserfs_journal_desc *)d_bh->b_data; - trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: " + trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: " "journal_read_transaction, offset %llu, len %d mount_id %d", - d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), + d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_mount_id(desc)); if (get_desc_trans_id(desc) < oldest_trans_id) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: " + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: " "journal_read_trans skipping because %lu is too old", cur_dblock - - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)); + SB_ONDISK_JOURNAL_1st_BLOCK(sb)); brelse(d_bh); return 1; } if (get_desc_mount_id(desc) != newest_mount_id) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: " + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: " "journal_read_trans skipping because %d is != " "newest_mount_id %lu", get_desc_mount_id(desc), newest_mount_id); brelse(d_bh); return 1; } - c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 1) % - SB_ONDISK_JOURNAL_SIZE(p_s_sb))); + SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) { brelse(d_bh); return 1; } commit = (struct reiserfs_journal_commit *)c_bh->b_data; - if (journal_compare_desc_commit(p_s_sb, desc, commit)) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + if (journal_compare_desc_commit(sb, desc, commit)) { + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_read_transaction, " "commit offset %llu had bad time %d or length %d", c_bh->b_blocknr - - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), + SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); @@ -2195,30 +2195,30 @@ static int journal_read_transaction(struct super_block *p_s_sb, brelse(d_bh); kfree(log_blocks); kfree(real_blocks); - reiserfs_warning(p_s_sb, "journal-1169", + reiserfs_warning(sb, "journal-1169", "kmalloc failed, unable to mount FS"); return -1; } /* get all the buffer heads */ - trans_half = journal_trans_half(p_s_sb->s_blocksize); + trans_half = journal_trans_half(sb->s_blocksize); for (i = 0; i < get_desc_trans_len(desc); i++) { log_blocks[i] = - journal_getblk(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + journal_getblk(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + (trans_offset + 1 + - i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)); + i) % SB_ONDISK_JOURNAL_SIZE(sb)); if (i < trans_half) { real_blocks[i] = - sb_getblk(p_s_sb, + sb_getblk(sb, le32_to_cpu(desc->j_realblock[i])); } else { real_blocks[i] = - sb_getblk(p_s_sb, + sb_getblk(sb, le32_to_cpu(commit-> j_realblock[i - trans_half])); } - if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) { - reiserfs_warning(p_s_sb, "journal-1207", + if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) { + reiserfs_warning(sb, "journal-1207", "REPLAY FAILURE fsck required! " "Block to replay is outside of " "filesystem"); @@ -2226,8 +2226,8 @@ static int journal_read_transaction(struct super_block *p_s_sb, } /* make sure we don't try to replay onto log or reserved area */ if (is_block_in_log_or_reserved_area - (p_s_sb, real_blocks[i]->b_blocknr)) { - reiserfs_warning(p_s_sb, "journal-1204", + (sb, real_blocks[i]->b_blocknr)) { + reiserfs_warning(sb, "journal-1204", "REPLAY FAILURE fsck required! " "Trying to replay onto a log block"); abort_replay: @@ -2245,7 +2245,7 @@ static int journal_read_transaction(struct super_block *p_s_sb, for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(log_blocks[i]); if (!buffer_uptodate(log_blocks[i])) { - reiserfs_warning(p_s_sb, "journal-1212", + reiserfs_warning(sb, "journal-1212", "REPLAY FAILURE fsck required! " "buffer write failed"); brelse_array(log_blocks + i, @@ -2270,7 +2270,7 @@ static int journal_read_transaction(struct super_block *p_s_sb, for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(real_blocks[i]); if (!buffer_uptodate(real_blocks[i])) { - reiserfs_warning(p_s_sb, "journal-1226", + reiserfs_warning(sb, "journal-1226", "REPLAY FAILURE, fsck required! " "buffer write failed"); brelse_array(real_blocks + i, @@ -2284,15 +2284,15 @@ static int journal_read_transaction(struct super_block *p_s_sb, brelse(real_blocks[i]); } cur_dblock = - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + - 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + 2) % SB_ONDISK_JOURNAL_SIZE(sb)); + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal " "start to offset %ld", - cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)); + cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); /* init starting values for the first transaction, in case this is the last transaction to be replayed. */ - journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); + journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb); journal->j_last_flush_trans_id = trans_id; journal->j_trans_id = trans_id + 1; /* check for trans_id overflow */ @@ -2357,9 +2357,9 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev, ** ** On exit, it sets things up so the first transaction will work correctly. */ -static int journal_read(struct super_block *p_s_sb) +static int journal_read(struct super_block *sb) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; unsigned int oldest_trans_id = 0; unsigned int oldest_invalid_trans_id = 0; @@ -2375,8 +2375,8 @@ static int journal_read(struct super_block *p_s_sb) int ret; char b[BDEVNAME_SIZE]; - cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb); - reiserfs_info(p_s_sb, "checking transaction log (%s)\n", + cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb); + reiserfs_info(sb, "checking transaction log (%s)\n", bdevname(journal->j_dev_bd, b)); start = get_seconds(); @@ -2384,22 +2384,22 @@ static int journal_read(struct super_block *p_s_sb) ** is the first unflushed, and if that transaction is not valid, ** replay is done */ - journal->j_header_bh = journal_bread(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) - + SB_ONDISK_JOURNAL_SIZE(p_s_sb)); + journal->j_header_bh = journal_bread(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + + SB_ONDISK_JOURNAL_SIZE(sb)); if (!journal->j_header_bh) { return 1; } jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data); if (le32_to_cpu(jh->j_first_unflushed_offset) < - SB_ONDISK_JOURNAL_SIZE(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(sb) && le32_to_cpu(jh->j_last_flush_trans_id) > 0) { oldest_start = - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset); oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; newest_mount_id = le32_to_cpu(jh->j_mount_id); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1153: found in " "header: first_unflushed_offset %d, last_flushed_trans_id " "%lu", le32_to_cpu(jh->j_first_unflushed_offset), @@ -2411,10 +2411,10 @@ static int journal_read(struct super_block *p_s_sb) ** through the whole log. */ d_bh = - journal_bread(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + journal_bread(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset)); - ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL); + ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL); if (!ret) { continue_replay = 0; } @@ -2422,8 +2422,8 @@ static int journal_read(struct super_block *p_s_sb) goto start_log_replay; } - if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) { - reiserfs_warning(p_s_sb, "clm-2076", + if (continue_replay && bdev_read_only(sb->s_bdev)) { + reiserfs_warning(sb, "clm-2076", "device is readonly, unable to replay log"); return -1; } @@ -2433,17 +2433,17 @@ static int journal_read(struct super_block *p_s_sb) */ while (continue_replay && cur_dblock < - (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + - SB_ONDISK_JOURNAL_SIZE(p_s_sb))) { + (SB_ONDISK_JOURNAL_1st_BLOCK(sb) + + SB_ONDISK_JOURNAL_SIZE(sb))) { /* Note that it is required for blocksize of primary fs device and journal device to be the same */ d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, - p_s_sb->s_blocksize, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + - SB_ONDISK_JOURNAL_SIZE(p_s_sb)); + sb->s_blocksize, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + + SB_ONDISK_JOURNAL_SIZE(sb)); ret = - journal_transaction_is_valid(p_s_sb, d_bh, + journal_transaction_is_valid(sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id); if (ret == 1) { @@ -2452,26 +2452,26 @@ static int journal_read(struct super_block *p_s_sb) oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; newest_mount_id = get_desc_mount_id(desc); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1179: Setting " "oldest_start to offset %llu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK - (p_s_sb), oldest_trans_id); + (sb), oldest_trans_id); } else if (oldest_trans_id > get_desc_trans_id(desc)) { /* one we just read was older */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting " "oldest_start to offset %lu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK - (p_s_sb), oldest_trans_id); + (sb), oldest_trans_id); } if (newest_mount_id < get_desc_mount_id(desc)) { newest_mount_id = get_desc_mount_id(desc); - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %d", get_desc_mount_id(desc)); @@ -2486,17 +2486,17 @@ static int journal_read(struct super_block *p_s_sb) start_log_replay: cur_dblock = oldest_start; if (oldest_trans_id) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay " "from offset %llu, trans_id %lu", - cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), + cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb), oldest_trans_id); } replay_count = 0; while (continue_replay && oldest_trans_id > 0) { ret = - journal_read_transaction(p_s_sb, cur_dblock, oldest_start, + journal_read_transaction(sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id); if (ret < 0) { return ret; @@ -2504,14 +2504,14 @@ static int journal_read(struct super_block *p_s_sb) break; } cur_dblock = - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start; + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start; replay_count++; if (cur_dblock == oldest_start) break; } if (oldest_trans_id == 0) { - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1225: No valid " "transactions found"); } /* j_start does not get set correctly if we don't replay any transactions. @@ -2531,16 +2531,16 @@ static int journal_read(struct super_block *p_s_sb) } else { journal->j_mount_id = newest_mount_id + 1; } - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %lu", journal->j_mount_id); journal->j_first_unflushed_offset = journal->j_start; if (replay_count > 0) { - reiserfs_info(p_s_sb, + reiserfs_info(sb, "replayed %d transactions in %lu seconds\n", replay_count, get_seconds() - start); } - if (!bdev_read_only(p_s_sb->s_bdev) && - _update_journal_header_block(p_s_sb, journal->j_start, + if (!bdev_read_only(sb->s_bdev) && + _update_journal_header_block(sb, journal->j_start, journal->j_last_flush_trans_id)) { /* replay failed, caller must call free_journal_ram and abort ** the mount @@ -2565,9 +2565,9 @@ static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s) return jl; } -static void journal_list_init(struct super_block *p_s_sb) +static void journal_list_init(struct super_block *sb) { - SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb); + SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb); } static int release_journal_dev(struct super_block *super, @@ -2666,28 +2666,28 @@ static int journal_init_dev(struct super_block *super, */ #define REISERFS_STANDARD_BLKSIZE (4096) -static int check_advise_trans_params(struct super_block *p_s_sb, +static int check_advise_trans_params(struct super_block *sb, struct reiserfs_journal *journal) { if (journal->j_trans_max) { /* Non-default journal params. Do sanity check for them. */ int ratio = 1; - if (p_s_sb->s_blocksize < REISERFS_STANDARD_BLKSIZE) - ratio = REISERFS_STANDARD_BLKSIZE / p_s_sb->s_blocksize; + if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE) + ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize; if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio || journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio || - SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max < + SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max < JOURNAL_MIN_RATIO) { - reiserfs_warning(p_s_sb, "sh-462", + reiserfs_warning(sb, "sh-462", "bad transaction max size (%u). " "FSCK?", journal->j_trans_max); return 1; } if (journal->j_max_batch != (journal->j_trans_max) * JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) { - reiserfs_warning(p_s_sb, "sh-463", + reiserfs_warning(sb, "sh-463", "bad transaction max batch (%u). " "FSCK?", journal->j_max_batch); return 1; @@ -2697,9 +2697,9 @@ static int check_advise_trans_params(struct super_block *p_s_sb, The file system was created by old version of mkreiserfs, so some fields contain zeros, and we need to advise proper values for them */ - if (p_s_sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) { - reiserfs_warning(p_s_sb, "sh-464", "bad blocksize (%u)", - p_s_sb->s_blocksize); + if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) { + reiserfs_warning(sb, "sh-464", "bad blocksize (%u)", + sb->s_blocksize); return 1; } journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT; @@ -2712,10 +2712,10 @@ static int check_advise_trans_params(struct super_block *p_s_sb, /* ** must be called once on fs mount. calls journal_read for you */ -int journal_init(struct super_block *p_s_sb, const char *j_dev_name, +int journal_init(struct super_block *sb, const char *j_dev_name, int old_format, unsigned int commit_max_age) { - int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2; + int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2; struct buffer_head *bhjh; struct reiserfs_super_block *rs; struct reiserfs_journal_header *jh; @@ -2723,9 +2723,9 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, struct reiserfs_journal_list *jl; char b[BDEVNAME_SIZE]; - journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal)); + journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal)); if (!journal) { - reiserfs_warning(p_s_sb, "journal-1256", + reiserfs_warning(sb, "journal-1256", "unable to get memory for journal structure"); return 1; } @@ -2735,50 +2735,50 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, INIT_LIST_HEAD(&journal->j_working_list); INIT_LIST_HEAD(&journal->j_journal_list); journal->j_persistent_trans = 0; - if (reiserfs_allocate_list_bitmaps(p_s_sb, + if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap, - reiserfs_bmap_count(p_s_sb))) + reiserfs_bmap_count(sb))) goto free_and_return; - allocate_bitmap_nodes(p_s_sb); + allocate_bitmap_nodes(sb); /* reserved for journal area support */ - SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ? + SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ? REISERFS_OLD_DISK_OFFSET_IN_BYTES - / p_s_sb->s_blocksize + - reiserfs_bmap_count(p_s_sb) + + / sb->s_blocksize + + reiserfs_bmap_count(sb) + 1 : REISERFS_DISK_OFFSET_IN_BYTES / - p_s_sb->s_blocksize + 2); + sb->s_blocksize + 2); /* Sanity check to see is the standard journal fitting withing first bitmap (actual for small blocksizes) */ - if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) && - (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + - SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) { - reiserfs_warning(p_s_sb, "journal-1393", + if (!SB_ONDISK_JOURNAL_DEVICE(sb) && + (SB_JOURNAL_1st_RESERVED_BLOCK(sb) + + SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) { + reiserfs_warning(sb, "journal-1393", "journal does not fit for area addressed " "by first of bitmap blocks. It starts at " "%u and its size is %u. Block size %ld", - SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb), - SB_ONDISK_JOURNAL_SIZE(p_s_sb), - p_s_sb->s_blocksize); + SB_JOURNAL_1st_RESERVED_BLOCK(sb), + SB_ONDISK_JOURNAL_SIZE(sb), + sb->s_blocksize); goto free_and_return; } - if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) { - reiserfs_warning(p_s_sb, "sh-462", + if (journal_init_dev(sb, journal, j_dev_name) != 0) { + reiserfs_warning(sb, "sh-462", "unable to initialize jornal device"); goto free_and_return; } - rs = SB_DISK_SUPER_BLOCK(p_s_sb); + rs = SB_DISK_SUPER_BLOCK(sb); /* read journal header */ - bhjh = journal_bread(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + - SB_ONDISK_JOURNAL_SIZE(p_s_sb)); + bhjh = journal_bread(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + + SB_ONDISK_JOURNAL_SIZE(sb)); if (!bhjh) { - reiserfs_warning(p_s_sb, "sh-459", + reiserfs_warning(sb, "sh-459", "unable to read journal header"); goto free_and_return; } @@ -2788,7 +2788,7 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) { - reiserfs_warning(p_s_sb, "sh-460", + reiserfs_warning(sb, "sh-460", "journal header magic %x (device %s) does " "not match to magic found in super block %x", jh->jh_journal.jp_journal_magic, @@ -2804,7 +2804,7 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age); journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; - if (check_advise_trans_params(p_s_sb, journal) != 0) + if (check_advise_trans_params(sb, journal) != 0) goto free_and_return; journal->j_default_max_commit_age = journal->j_max_commit_age; @@ -2813,12 +2813,12 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, journal->j_max_trans_age = commit_max_age; } - reiserfs_info(p_s_sb, "journal params: device %s, size %u, " + reiserfs_info(sb, "journal params: device %s, size %u, " "journal first block %u, max trans len %u, max batch %u, " "max commit age %u, max trans age %u\n", bdevname(journal->j_dev_bd, b), - SB_ONDISK_JOURNAL_SIZE(p_s_sb), - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), + SB_ONDISK_JOURNAL_SIZE(sb), + SB_ONDISK_JOURNAL_1st_BLOCK(sb), journal->j_trans_max, journal->j_max_batch, journal->j_max_commit_age, journal->j_max_trans_age); @@ -2826,7 +2826,7 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, brelse(bhjh); journal->j_list_bitmap_index = 0; - journal_list_init(p_s_sb); + journal_list_init(sb); memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); @@ -2858,7 +2858,7 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, journal->j_must_wait = 0; if (journal->j_cnode_free == 0) { - reiserfs_warning(p_s_sb, "journal-2004", "Journal cnode memory " + reiserfs_warning(sb, "journal-2004", "Journal cnode memory " "allocation failed (%ld bytes). Journal is " "too large for available memory. Usually " "this is due to a journal that is too large.", @@ -2866,16 +2866,16 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, goto free_and_return; } - init_journal_hash(p_s_sb); + init_journal_hash(sb); jl = journal->j_current_jl; - jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl); + jl->j_list_bitmap = get_list_bitmap(sb, jl); if (!jl->j_list_bitmap) { - reiserfs_warning(p_s_sb, "journal-2005", + reiserfs_warning(sb, "journal-2005", "get_list_bitmap failed for journal list 0"); goto free_and_return; } - if (journal_read(p_s_sb) < 0) { - reiserfs_warning(p_s_sb, "reiserfs-2006", + if (journal_read(sb) < 0) { + reiserfs_warning(sb, "reiserfs-2006", "Replay Failure, unable to mount"); goto free_and_return; } @@ -2885,10 +2885,10 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, commit_wq = create_workqueue("reiserfs"); INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); - journal->j_work_sb = p_s_sb; + journal->j_work_sb = sb; return 0; free_and_return: - free_journal_ram(p_s_sb); + free_journal_ram(sb); return 1; } @@ -3004,37 +3004,37 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id) ** expect to use in nblocks. */ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks, + struct super_block *sb, unsigned long nblocks, int join) { time_t now = get_seconds(); unsigned int old_trans_id; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_transaction_handle myth; int sched_count = 0; int retval; - reiserfs_check_lock_depth(p_s_sb, "journal_begin"); + reiserfs_check_lock_depth(sb, "journal_begin"); BUG_ON(nblocks > journal->j_trans_max); - PROC_INFO_INC(p_s_sb, journal.journal_being); + PROC_INFO_INC(sb, journal.journal_being); /* set here for journal_join */ th->t_refcount = 1; - th->t_super = p_s_sb; + th->t_super = sb; relock: - lock_journal(p_s_sb); + lock_journal(sb); if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) { - unlock_journal(p_s_sb); + unlock_journal(sb); retval = journal->j_errno; goto out_fail; } journal->j_bcount++; if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { - unlock_journal(p_s_sb); - reiserfs_wait_on_write_block(p_s_sb); - PROC_INFO_INC(p_s_sb, journal.journal_relock_writers); + unlock_journal(sb); + reiserfs_wait_on_write_block(sb); + PROC_INFO_INC(sb, journal.journal_relock_writers); goto relock; } now = get_seconds(); @@ -3055,7 +3055,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) { old_trans_id = journal->j_trans_id; - unlock_journal(p_s_sb); /* allow others to finish this transaction */ + unlock_journal(sb); /* allow others to finish this transaction */ if (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch && @@ -3063,7 +3063,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, (journal->j_len_alloc * 75)) { if (atomic_read(&journal->j_wcount) > 10) { sched_count++; - queue_log_writer(p_s_sb); + queue_log_writer(sb); goto relock; } } @@ -3073,25 +3073,25 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, if (atomic_read(&journal->j_jlock)) { while (journal->j_trans_id == old_trans_id && atomic_read(&journal->j_jlock)) { - queue_log_writer(p_s_sb); + queue_log_writer(sb); } goto relock; } - retval = journal_join(&myth, p_s_sb, 1); + retval = journal_join(&myth, sb, 1); if (retval) goto out_fail; /* someone might have ended the transaction while we joined */ if (old_trans_id != journal->j_trans_id) { - retval = do_journal_end(&myth, p_s_sb, 1, 0); + retval = do_journal_end(&myth, sb, 1, 0); } else { - retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW); + retval = do_journal_end(&myth, sb, 1, COMMIT_NOW); } if (retval) goto out_fail; - PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount); + PROC_INFO_INC(sb, journal.journal_relock_wcount); goto relock; } /* we are the first writer, set trans_id */ @@ -3103,7 +3103,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, th->t_blocks_logged = 0; th->t_blocks_allocated = nblocks; th->t_trans_id = journal->j_trans_id; - unlock_journal(p_s_sb); + unlock_journal(sb); INIT_LIST_HEAD(&th->t_list); get_fs_excl(); return 0; @@ -3113,7 +3113,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ - th->t_super = p_s_sb; + th->t_super = sb; return retval; } @@ -3164,7 +3164,7 @@ int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) } static int journal_join(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks) + struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; @@ -3173,11 +3173,11 @@ static int journal_join(struct reiserfs_transaction_handle *th, */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); - return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN); + return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN); } int journal_join_abort(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks) + struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; @@ -3186,11 +3186,11 @@ int journal_join_abort(struct reiserfs_transaction_handle *th, */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); - return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT); + return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT); } int journal_begin(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks) + struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; int ret; @@ -3198,12 +3198,12 @@ int journal_begin(struct reiserfs_transaction_handle *th, th->t_handle_save = NULL; if (cur_th) { /* we are nesting into the current transaction */ - if (cur_th->t_super == p_s_sb) { + if (cur_th->t_super == sb) { BUG_ON(!cur_th->t_refcount); cur_th->t_refcount++; memcpy(th, cur_th, sizeof(*th)); if (th->t_refcount <= 1) - reiserfs_warning(p_s_sb, "reiserfs-2005", + reiserfs_warning(sb, "reiserfs-2005", "BAD: refcount <= 1, but " "journal_info != 0"); return 0; @@ -3212,7 +3212,7 @@ int journal_begin(struct reiserfs_transaction_handle *th, ** save it and restore on journal_end. This should never ** really happen... */ - reiserfs_warning(p_s_sb, "clm-2100", + reiserfs_warning(sb, "clm-2100", "nesting info a different FS"); th->t_handle_save = current->journal_info; current->journal_info = th; @@ -3220,7 +3220,7 @@ int journal_begin(struct reiserfs_transaction_handle *th, } else { current->journal_info = th; } - ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG); + ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG); BUG_ON(current->journal_info != th); /* I guess this boils down to being the reciprocal of clm-2100 above. @@ -3244,28 +3244,28 @@ int journal_begin(struct reiserfs_transaction_handle *th, ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. */ int journal_mark_dirty(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, struct buffer_head *bh) + struct super_block *sb, struct buffer_head *bh) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; int count_already_incd = 0; int prepared = 0; BUG_ON(!th->t_trans_id); - PROC_INFO_INC(p_s_sb, journal.mark_dirty); + PROC_INFO_INC(sb, journal.mark_dirty); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } - p_s_sb->s_dirt = 1; + sb->s_dirt = 1; prepared = test_clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); /* already in this transaction, we are done */ if (buffer_journaled(bh)) { - PROC_INFO_INC(p_s_sb, journal.mark_dirty_already); + PROC_INFO_INC(sb, journal.mark_dirty_already); return 0; } @@ -3274,7 +3274,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, ** could get to disk too early. NOT GOOD. */ if (!prepared || buffer_dirty(bh)) { - reiserfs_warning(p_s_sb, "journal-1777", + reiserfs_warning(sb, "journal-1777", "buffer %llu bad state " "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT", (unsigned long long)bh->b_blocknr, @@ -3285,7 +3285,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, } if (atomic_read(&(journal->j_wcount)) <= 0) { - reiserfs_warning(p_s_sb, "journal-1409", + reiserfs_warning(sb, "journal-1409", "returning because j_wcount was %d", atomic_read(&(journal->j_wcount))); return 1; @@ -3301,7 +3301,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, if (buffer_journal_dirty(bh)) { count_already_incd = 1; - PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal); + PROC_INFO_INC(sb, journal.mark_dirty_notjournal); clear_buffer_journal_dirty(bh); } @@ -3313,10 +3313,9 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, /* now put this guy on the end */ if (!cn) { - cn = get_cnode(p_s_sb); + cn = get_cnode(sb); if (!cn) { - reiserfs_panic(p_s_sb, "journal-4", - "get_cnode failed!"); + reiserfs_panic(sb, "journal-4", "get_cnode failed!"); } if (th->t_blocks_logged == th->t_blocks_allocated) { @@ -3328,7 +3327,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, cn->bh = bh; cn->blocknr = bh->b_blocknr; - cn->sb = p_s_sb; + cn->sb = sb; cn->jlist = NULL; insert_journal_hash(journal->j_hash_table, cn); if (!count_already_incd) { @@ -3349,10 +3348,10 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, } int journal_end(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks) + struct super_block *sb, unsigned long nblocks) { if (!current->journal_info && th->t_refcount > 1) - reiserfs_warning(p_s_sb, "REISER-NESTING", + reiserfs_warning(sb, "REISER-NESTING", "th NULL, refcount %d", th->t_refcount); if (!th->t_trans_id) { @@ -3376,7 +3375,7 @@ int journal_end(struct reiserfs_transaction_handle *th, } return 0; } else { - return do_journal_end(th, p_s_sb, nblocks, 0); + return do_journal_end(th, sb, nblocks, 0); } } @@ -3387,15 +3386,15 @@ int journal_end(struct reiserfs_transaction_handle *th, ** ** returns 1 if it cleaned and relsed the buffer. 0 otherwise */ -static int remove_from_transaction(struct super_block *p_s_sb, +static int remove_from_transaction(struct super_block *sb, b_blocknr_t blocknr, int already_cleaned) { struct buffer_head *bh; struct reiserfs_journal_cnode *cn; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; - cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr); + cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (!cn || !cn->bh) { return ret; } @@ -3413,7 +3412,7 @@ static int remove_from_transaction(struct super_block *p_s_sb, journal->j_last = cn->prev; } if (bh) - remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, + remove_journal_hash(sb, journal->j_hash_table, NULL, bh->b_blocknr, 0); clear_buffer_journaled(bh); /* don't log this one */ @@ -3423,14 +3422,14 @@ static int remove_from_transaction(struct super_block *p_s_sb, clear_buffer_journal_test(bh); put_bh(bh); if (atomic_read(&(bh->b_count)) < 0) { - reiserfs_warning(p_s_sb, "journal-1752", + reiserfs_warning(sb, "journal-1752", "b_count < 0"); } ret = 1; } journal->j_len--; journal->j_len_alloc--; - free_cnode(p_s_sb, cn); + free_cnode(sb, cn); return ret; } @@ -3481,19 +3480,19 @@ static int can_dirty(struct reiserfs_journal_cnode *cn) ** will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks) + struct super_block *sb, unsigned long nblocks) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); /* you can sync while nested, very, very bad */ BUG_ON(th->t_refcount > 1); if (journal->j_len == 0) { - reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), + reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); - journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)); + journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } - return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT); + return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT); } /* @@ -3503,7 +3502,7 @@ static void flush_async_commits(struct work_struct *work) { struct reiserfs_journal *journal = container_of(work, struct reiserfs_journal, j_work.work); - struct super_block *p_s_sb = journal->j_work_sb; + struct super_block *sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; @@ -3512,7 +3511,7 @@ static void flush_async_commits(struct work_struct *work) /* last entry is the youngest, commit it and you get everything */ entry = journal->j_journal_list.prev; jl = JOURNAL_LIST_ENTRY(entry); - flush_commit_list(p_s_sb, jl, 1); + flush_commit_list(sb, jl, 1); } unlock_kernel(); } @@ -3521,11 +3520,11 @@ static void flush_async_commits(struct work_struct *work) ** flushes any old transactions to disk ** ends the current transaction if it is too old */ -int reiserfs_flush_old_commits(struct super_block *p_s_sb) +int reiserfs_flush_old_commits(struct super_block *sb) { time_t now; struct reiserfs_transaction_handle th; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); now = get_seconds(); /* safety check so we don't flush while we are replaying the log during @@ -3542,20 +3541,20 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb) journal->j_trans_start_time > 0 && journal->j_len > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) { - if (!journal_join(&th, p_s_sb, 1)) { - reiserfs_prepare_for_journal(p_s_sb, - SB_BUFFER_WITH_SB(p_s_sb), + if (!journal_join(&th, sb, 1)) { + reiserfs_prepare_for_journal(sb, + SB_BUFFER_WITH_SB(sb), 1); - journal_mark_dirty(&th, p_s_sb, - SB_BUFFER_WITH_SB(p_s_sb)); + journal_mark_dirty(&th, sb, + SB_BUFFER_WITH_SB(sb)); /* we're only being called from kreiserfsd, it makes no sense to do ** an async commit so that kreiserfsd can do it later */ - do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT); + do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT); } } - return p_s_sb->s_dirt; + return sb->s_dirt; } /* @@ -3570,7 +3569,7 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb) ** Note, we can't allow the journal_end to proceed while there are still writers in the log. */ static int check_journal_end(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks, + struct super_block *sb, unsigned long nblocks, int flags) { @@ -3579,7 +3578,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, int commit_now = flags & COMMIT_NOW; int wait_on_commit = flags & WAIT; struct reiserfs_journal_list *jl; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); @@ -3618,31 +3617,31 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, if (flush) { journal->j_next_full_flush = 1; } - unlock_journal(p_s_sb); + unlock_journal(sb); /* sleep while the current transaction is still j_jlocked */ while (journal->j_trans_id == trans_id) { if (atomic_read(&journal->j_jlock)) { - queue_log_writer(p_s_sb); + queue_log_writer(sb); } else { - lock_journal(p_s_sb); + lock_journal(sb); if (journal->j_trans_id == trans_id) { atomic_set(&(journal->j_jlock), 1); } - unlock_journal(p_s_sb); + unlock_journal(sb); } } BUG_ON(journal->j_trans_id == trans_id); if (commit_now - && journal_list_still_alive(p_s_sb, trans_id) + && journal_list_still_alive(sb, trans_id) && wait_on_commit) { - flush_commit_list(p_s_sb, jl, 1); + flush_commit_list(sb, jl, 1); } return 0; } - unlock_journal(p_s_sb); + unlock_journal(sb); return 0; } @@ -3659,12 +3658,12 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, && journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) { journal->j_bcount++; - unlock_journal(p_s_sb); + unlock_journal(sb); return 0; } - if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { - reiserfs_panic(p_s_sb, "journal-003", + if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) { + reiserfs_panic(sb, "journal-003", "j_start (%ld) is too high", journal->j_start); } @@ -3686,16 +3685,16 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list. */ int journal_mark_freed(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, b_blocknr_t blocknr) + struct super_block *sb, b_blocknr_t blocknr) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; struct buffer_head *bh = NULL; struct reiserfs_list_bitmap *jb = NULL; int cleaned = 0; BUG_ON(!th->t_trans_id); - cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr); + cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (cn && cn->bh) { bh = cn->bh; get_bh(bh); @@ -3705,15 +3704,15 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, clear_buffer_journal_new(bh); clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); - cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned); + cleaned = remove_from_transaction(sb, blocknr, cleaned); } else { /* set the bit for this block in the journal bitmap for this transaction */ jb = journal->j_current_jl->j_list_bitmap; if (!jb) { - reiserfs_panic(p_s_sb, "journal-1702", + reiserfs_panic(sb, "journal-1702", "journal_list_bitmap is NULL"); } - set_bit_in_list_bitmap(p_s_sb, blocknr, jb); + set_bit_in_list_bitmap(sb, blocknr, jb); /* Note, the entire while loop is not allowed to schedule. */ @@ -3721,13 +3720,13 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); } - cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned); + cleaned = remove_from_transaction(sb, blocknr, cleaned); /* find all older transactions with this block, make sure they don't try to write it out */ - cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, + cn = get_journal_hash_dev(sb, journal->j_list_hash_table, blocknr); while (cn) { - if (p_s_sb == cn->sb && blocknr == cn->blocknr) { + if (sb == cn->sb && blocknr == cn->blocknr) { set_bit(BLOCK_FREED, &cn->state); if (cn->bh) { if (!cleaned) { @@ -3743,7 +3742,7 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, put_bh(cn->bh); if (atomic_read (&(cn->bh->b_count)) < 0) { - reiserfs_warning(p_s_sb, + reiserfs_warning(sb, "journal-2138", "cn->bh->b_count < 0"); } @@ -3850,18 +3849,18 @@ int reiserfs_commit_for_inode(struct inode *inode) return __commit_trans_jl(inode, id, jl); } -void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb, +void reiserfs_restore_prepared_buffer(struct super_block *sb, struct buffer_head *bh) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); - PROC_INFO_INC(p_s_sb, journal.restore_prepared); + struct reiserfs_journal *journal = SB_JOURNAL(sb); + PROC_INFO_INC(sb, journal.restore_prepared); if (!bh) { return; } if (test_clear_buffer_journal_restore_dirty(bh) && buffer_journal_dirty(bh)) { struct reiserfs_journal_cnode *cn; - cn = get_journal_hash_dev(p_s_sb, + cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bh->b_blocknr); if (cn && can_dirty(cn)) { @@ -3880,10 +3879,10 @@ extern struct tree_balance *cur_tb; ** wait on it. ** */ -int reiserfs_prepare_for_journal(struct super_block *p_s_sb, +int reiserfs_prepare_for_journal(struct super_block *sb, struct buffer_head *bh, int wait) { - PROC_INFO_INC(p_s_sb, journal.prepare); + PROC_INFO_INC(sb, journal.prepare); if (!trylock_buffer(bh)) { if (!wait) @@ -3931,10 +3930,10 @@ static void flush_old_journal_lists(struct super_block *s) ** journal lists, etc just won't happen. */ static int do_journal_end(struct reiserfs_transaction_handle *th, - struct super_block *p_s_sb, unsigned long nblocks, + struct super_block *sb, unsigned long nblocks, int flags) { - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *next, *jl_cn; struct reiserfs_journal_cnode *last_cn = NULL; struct reiserfs_journal_desc *desc; @@ -3964,14 +3963,14 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, put_fs_excl(); current->journal_info = th->t_handle_save; - reiserfs_check_lock_depth(p_s_sb, "journal end"); + reiserfs_check_lock_depth(sb, "journal end"); if (journal->j_len == 0) { - reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), + reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); - journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)); + journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } - lock_journal(p_s_sb); + lock_journal(sb); if (journal->j_next_full_flush) { flags |= FLUSH_ALL; flush = 1; @@ -3984,10 +3983,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, /* check_journal_end locks the journal, and unlocks if it does not return 1 ** it tells us if we should continue with the journal_end, or just return */ - if (!check_journal_end(th, p_s_sb, nblocks, flags)) { - p_s_sb->s_dirt = 1; - wake_queued_writers(p_s_sb); - reiserfs_async_progress_wait(p_s_sb); + if (!check_journal_end(th, sb, nblocks, flags)) { + sb->s_dirt = 1; + wake_queued_writers(sb); + reiserfs_async_progress_wait(sb); goto out; } @@ -4016,8 +4015,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, /* setup description block */ d_bh = - journal_getblk(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + journal_getblk(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start); set_buffer_uptodate(d_bh); desc = (struct reiserfs_journal_desc *)(d_bh)->b_data; @@ -4026,9 +4025,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, set_desc_trans_id(desc, journal->j_trans_id); /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */ - c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((journal->j_start + journal->j_len + - 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))); + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); commit = (struct reiserfs_journal_commit *)c_bh->b_data; memset(c_bh->b_data, 0, c_bh->b_size); set_commit_trans_id(commit, journal->j_trans_id); @@ -4061,12 +4060,12 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ** for each real block, add it to the journal list hash, ** copy into real block index array in the commit or desc block */ - trans_half = journal_trans_half(p_s_sb->s_blocksize); + trans_half = journal_trans_half(sb->s_blocksize); for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) { if (buffer_journaled(cn->bh)) { - jl_cn = get_cnode(p_s_sb); + jl_cn = get_cnode(sb); if (!jl_cn) { - reiserfs_panic(p_s_sb, "journal-1676", + reiserfs_panic(sb, "journal-1676", "get_cnode returned NULL"); } if (i == 0) { @@ -4082,15 +4081,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, of journal or reserved area */ if (is_block_in_log_or_reserved_area - (p_s_sb, cn->bh->b_blocknr)) { - reiserfs_panic(p_s_sb, "journal-2332", + (sb, cn->bh->b_blocknr)) { + reiserfs_panic(sb, "journal-2332", "Trying to log block %lu, " "which is a log block", cn->bh->b_blocknr); } jl_cn->blocknr = cn->bh->b_blocknr; jl_cn->state = 0; - jl_cn->sb = p_s_sb; + jl_cn->sb = sb; jl_cn->bh = cn->bh; jl_cn->jlist = jl; insert_journal_hash(journal->j_list_hash_table, jl_cn); @@ -4131,11 +4130,11 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, char *addr; struct page *page; tmp_bh = - journal_getblk(p_s_sb, - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + + journal_getblk(sb, + SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((cur_write_start + jindex) % - SB_ONDISK_JOURNAL_SIZE(p_s_sb))); + SB_ONDISK_JOURNAL_SIZE(sb))); set_buffer_uptodate(tmp_bh); page = cn->bh->b_page; addr = kmap(page); @@ -4149,13 +4148,13 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, clear_buffer_journaled(cn->bh); } else { /* JDirty cleared sometime during transaction. don't log this one */ - reiserfs_warning(p_s_sb, "journal-2048", + reiserfs_warning(sb, "journal-2048", "BAD, buffer in journal hash, " "but not JDirty!"); brelse(cn->bh); } next = cn->next; - free_cnode(p_s_sb, cn); + free_cnode(sb, cn); cn = next; cond_resched(); } @@ -4165,7 +4164,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1. */ - journal->j_current_jl = alloc_journal_list(p_s_sb); + journal->j_current_jl = alloc_journal_list(sb); /* now it is safe to insert this transaction on the main list */ list_add_tail(&jl->j_list, &journal->j_journal_list); @@ -4176,7 +4175,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, old_start = journal->j_start; journal->j_start = (journal->j_start + journal->j_len + - 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb); + 2) % SB_ONDISK_JOURNAL_SIZE(sb); atomic_set(&(journal->j_wcount), 0); journal->j_bcount = 0; journal->j_last = NULL; @@ -4191,7 +4190,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, journal->j_len_alloc = 0; journal->j_next_full_flush = 0; journal->j_next_async_flush = 0; - init_journal_hash(p_s_sb); + init_journal_hash(sb); // make sure reiserfs_add_jh sees the new current_jl before we // write out the tails @@ -4220,8 +4219,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ** queue don't wait for this proc to flush journal lists and such. */ if (flush) { - flush_commit_list(p_s_sb, jl, 1); - flush_journal_list(p_s_sb, jl, 1); + flush_commit_list(sb, jl, 1); + flush_journal_list(sb, jl, 1); } else if (!(jl->j_state & LIST_COMMIT_PENDING)) queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); @@ -4235,11 +4234,11 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, if (journal->j_start <= temp_jl->j_start) { if ((journal->j_start + journal->j_trans_max + 1) >= temp_jl->j_start) { - flush_used_journal_lists(p_s_sb, temp_jl); + flush_used_journal_lists(sb, temp_jl); goto first_jl; } else if ((journal->j_start + journal->j_trans_max + 1) < - SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { + SB_ONDISK_JOURNAL_SIZE(sb)) { /* if we don't cross into the next transaction and we don't * wrap, there is no way we can overlap any later transactions * break now @@ -4248,11 +4247,11 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, } } else if ((journal->j_start + journal->j_trans_max + 1) > - SB_ONDISK_JOURNAL_SIZE(p_s_sb)) { + SB_ONDISK_JOURNAL_SIZE(sb)) { if (((journal->j_start + journal->j_trans_max + 1) % - SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= + SB_ONDISK_JOURNAL_SIZE(sb)) >= temp_jl->j_start) { - flush_used_journal_lists(p_s_sb, temp_jl); + flush_used_journal_lists(sb, temp_jl); goto first_jl; } else { /* we don't overlap anything from out start to the end of the @@ -4263,34 +4262,34 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, } } } - flush_old_journal_lists(p_s_sb); + flush_old_journal_lists(sb); journal->j_current_jl->j_list_bitmap = - get_list_bitmap(p_s_sb, journal->j_current_jl); + get_list_bitmap(sb, journal->j_current_jl); if (!(journal->j_current_jl->j_list_bitmap)) { - reiserfs_panic(p_s_sb, "journal-1996", + reiserfs_panic(sb, "journal-1996", "could not get a list bitmap"); } atomic_set(&(journal->j_jlock), 0); - unlock_journal(p_s_sb); + unlock_journal(sb); /* wake up any body waiting to join. */ clear_bit(J_WRITERS_QUEUED, &journal->j_state); wake_up(&(journal->j_join_wait)); if (!flush && wait_on_commit && - journal_list_still_alive(p_s_sb, commit_trans_id)) { - flush_commit_list(p_s_sb, jl, 1); + journal_list_still_alive(sb, commit_trans_id)) { + flush_commit_list(sb, jl, 1); } out: - reiserfs_check_lock_depth(p_s_sb, "journal end2"); + reiserfs_check_lock_depth(sb, "journal end2"); memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ - th->t_super = p_s_sb; + th->t_super = sb; return journal->j_errno; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index a65bfee28bb8..00fd879c4a2a 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -245,7 +245,7 @@ static const struct reiserfs_key MAX_KEY = { static inline const struct reiserfs_key *get_lkey(const struct treepath *p_s_chk_path, const struct super_block - *p_s_sb) + *sb) { int n_position, n_path_offset = p_s_chk_path->path_length; struct buffer_head *p_s_parent; @@ -282,14 +282,14 @@ static inline const struct reiserfs_key *get_lkey(const struct treepath } /* Return MIN_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)-> - b_blocknr == SB_ROOT_BLOCK(p_s_sb)) + b_blocknr == SB_ROOT_BLOCK(sb)) return &MIN_KEY; return &MAX_KEY; } /* Get delimiting key of the buffer at the path and its right neighbor. */ inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, - const struct super_block *p_s_sb) + const struct super_block *sb) { int n_position, n_path_offset = p_s_chk_path->path_length; struct buffer_head *p_s_parent; @@ -325,7 +325,7 @@ inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, } /* Return MAX_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)-> - b_blocknr == SB_ROOT_BLOCK(p_s_sb)) + b_blocknr == SB_ROOT_BLOCK(sb)) return &MAX_KEY; return &MIN_KEY; } @@ -337,7 +337,7 @@ inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */ static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which should be checked. */ const struct cpu_key *p_s_key, /* Key which should be checked. */ - struct super_block *p_s_sb /* Super block pointer. */ + struct super_block *sb /* Super block pointer. */ ) { @@ -348,11 +348,11 @@ static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which sho RFALSE(!PATH_PLAST_BUFFER(p_s_chk_path)->b_bdev, "PAP-5060: device must not be NODEV"); - if (comp_keys(get_lkey(p_s_chk_path, p_s_sb), p_s_key) == 1) + if (comp_keys(get_lkey(p_s_chk_path, sb), p_s_key) == 1) /* left delimiting key is bigger, that the key we look for */ return 0; - // if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, p_s_sb)) != -1 ) - if (comp_keys(get_rkey(p_s_chk_path, p_s_sb), p_s_key) != 1) + // if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, sb)) != -1 ) + if (comp_keys(get_rkey(p_s_chk_path, sb), p_s_key) != 1) /* p_s_key must be less than right delimitiing key */ return 0; return 1; @@ -546,7 +546,7 @@ static void search_by_key_reada(struct super_block *s, /************************************************************************** * Algorithm SearchByKey * * look for item in the Disk S+Tree by its key * - * Input: p_s_sb - super block * + * Input: sb - super block * * p_s_key - pointer to the key to search * * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR * * p_s_search_path - path from the root to the needed leaf * @@ -566,7 +566,7 @@ static void search_by_key_reada(struct super_block *s, correctness of the top of the path but need not be checked for the correctness of the bottom of the path */ /* The function is NOT SCHEDULE-SAFE! */ -int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* Key to search. */ +int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key to search. */ struct treepath *p_s_search_path,/* This structure was allocated and initialized by the calling @@ -592,7 +592,7 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* int n_repeat_counter = 0; #endif - PROC_INFO_INC(p_s_sb, search_by_key); + PROC_INFO_INC(sb, search_by_key); /* As we add each node to a path we increase its count. This means that we must be careful to release all nodes in a path before we either @@ -605,13 +605,13 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* /* With each iteration of this loop we search through the items in the current node, and calculate the next current node(next path element) for the next iteration of this loop.. */ - n_block_number = SB_ROOT_BLOCK(p_s_sb); + n_block_number = SB_ROOT_BLOCK(sb); expected_level = -1; while (1) { #ifdef CONFIG_REISERFS_CHECK if (!(++n_repeat_counter % 50000)) - reiserfs_warning(p_s_sb, "PAP-5100", + reiserfs_warning(sb, "PAP-5100", "%s: there were %d iterations of " "while loop looking for key %K", current->comm, n_repeat_counter, @@ -622,14 +622,14 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* p_s_last_element = PATH_OFFSET_PELEMENT(p_s_search_path, ++p_s_search_path->path_length); - fs_gen = get_generation(p_s_sb); + fs_gen = get_generation(sb); /* Read the next tree node, and set the last element in the path to have a pointer to it. */ if ((p_s_bh = p_s_last_element->pe_buffer = - sb_getblk(p_s_sb, n_block_number))) { + sb_getblk(sb, n_block_number))) { if (!buffer_uptodate(p_s_bh) && reada_count > 1) { - search_by_key_reada(p_s_sb, reada_bh, + search_by_key_reada(sb, reada_bh, reada_blocks, reada_count); } ll_rw_block(READ, 1, &p_s_bh); @@ -644,25 +644,25 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* } reada_count = 0; if (expected_level == -1) - expected_level = SB_TREE_HEIGHT(p_s_sb); + expected_level = SB_TREE_HEIGHT(sb); expected_level--; /* It is possible that schedule occurred. We must check whether the key to search is still in the tree rooted from the current buffer. If not then repeat search from the root. */ - if (fs_changed(fs_gen, p_s_sb) && + if (fs_changed(fs_gen, sb) && (!B_IS_IN_TREE(p_s_bh) || B_LEVEL(p_s_bh) != expected_level || - !key_in_buffer(p_s_search_path, p_s_key, p_s_sb))) { - PROC_INFO_INC(p_s_sb, search_by_key_fs_changed); - PROC_INFO_INC(p_s_sb, search_by_key_restarted); - PROC_INFO_INC(p_s_sb, + !key_in_buffer(p_s_search_path, p_s_key, sb))) { + PROC_INFO_INC(sb, search_by_key_fs_changed); + PROC_INFO_INC(sb, search_by_key_restarted); + PROC_INFO_INC(sb, sbk_restarted[expected_level - 1]); pathrelse(p_s_search_path); /* Get the root block number so that we can repeat the search starting from the root. */ - n_block_number = SB_ROOT_BLOCK(p_s_sb); + n_block_number = SB_ROOT_BLOCK(sb); expected_level = -1; right_neighbor_of_leaf_node = 0; @@ -674,12 +674,12 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* equal to the MAX_KEY. Latter case is only possible in "finish_unfinished()" processing during mount. */ RFALSE(comp_keys(&MAX_KEY, p_s_key) && - !key_in_buffer(p_s_search_path, p_s_key, p_s_sb), + !key_in_buffer(p_s_search_path, p_s_key, sb), "PAP-5130: key is not in the buffer"); #ifdef CONFIG_REISERFS_CHECK if (cur_tb) { print_cur_tb("5140"); - reiserfs_panic(p_s_sb, "PAP-5140", + reiserfs_panic(sb, "PAP-5140", "schedule occurred in do_balance!"); } #endif @@ -687,7 +687,7 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* // make sure, that the node contents look like a node of // certain level if (!is_tree_node(p_s_bh, expected_level)) { - reiserfs_error(p_s_sb, "vs-5150", + reiserfs_error(sb, "vs-5150", "invalid format found in block %ld. " "Fsck?", p_s_bh->b_blocknr); pathrelse(p_s_search_path); @@ -697,7 +697,7 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* /* ok, we have acquired next formatted node in the tree */ n_node_level = B_LEVEL(p_s_bh); - PROC_INFO_BH_STAT(p_s_sb, p_s_bh, n_node_level - 1); + PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level - 1); RFALSE(n_node_level < n_stop_level, "vs-5152: tree level (%d) is less than stop level (%d)", @@ -776,7 +776,7 @@ int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* units of directory entries. */ /* The function is NOT SCHEDULE-SAFE! */ -int search_for_position_by_key(struct super_block *p_s_sb, /* Pointer to the super block. */ +int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */ const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */ struct treepath *p_s_search_path /* Filled up by this function. */ ) @@ -789,13 +789,13 @@ int search_for_position_by_key(struct super_block *p_s_sb, /* Pointer to the sup /* If searching for directory entry. */ if (is_direntry_cpu_key(p_cpu_key)) - return search_by_entry_key(p_s_sb, p_cpu_key, p_s_search_path, + return search_by_entry_key(sb, p_cpu_key, p_s_search_path, &de); /* If not searching for directory entry. */ /* If item is found. */ - retval = search_item(p_s_sb, p_cpu_key, p_s_search_path); + retval = search_item(sb, p_cpu_key, p_s_search_path); if (retval == IO_ERROR) return retval; if (retval == ITEM_FOUND) { @@ -817,7 +817,7 @@ int search_for_position_by_key(struct super_block *p_s_sb, /* Pointer to the sup p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path), --PATH_LAST_POSITION(p_s_search_path)); - n_blk_size = p_s_sb->s_blocksize; + n_blk_size = sb->s_blocksize; if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) { return FILE_NOT_FOUND; @@ -957,7 +957,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st int *p_n_cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */ ) { - struct super_block *p_s_sb = inode->i_sb; + struct super_block *sb = inode->i_sb; struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_path); struct buffer_head *p_s_bh = PATH_PLAST_BUFFER(p_s_path); @@ -986,7 +986,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st /* Case of an indirect item. */ { - int blk_size = p_s_sb->s_blocksize; + int blk_size = sb->s_blocksize; struct item_head s_ih; int need_re_search; int delete = 0; @@ -1023,9 +1023,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st block = get_block_num(unfm, 0); if (block != 0) { - reiserfs_prepare_for_journal(p_s_sb, p_s_bh, 1); + reiserfs_prepare_for_journal(sb, p_s_bh, 1); put_block_num(unfm, 0, 0); - journal_mark_dirty (th, p_s_sb, p_s_bh); + journal_mark_dirty (th, sb, p_s_bh); reiserfs_free_block(th, inode, block, 1); } @@ -1049,9 +1049,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st /* a trick. If the buffer has been logged, this will do nothing. If ** we've broken the loop without logging it, it will restore the ** buffer */ - reiserfs_restore_prepared_buffer(p_s_sb, p_s_bh); + reiserfs_restore_prepared_buffer(sb, p_s_bh); } while (need_re_search && - search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_FOUND); + search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_FOUND); pos_in_item(p_s_path) = pos * UNFM_P_SIZE; if (*p_n_cut_size == 0) { @@ -1090,7 +1090,7 @@ static int calc_deleted_bytes_number(struct tree_balance *p_s_tb, char c_mode) static void init_tb_struct(struct reiserfs_transaction_handle *th, struct tree_balance *p_s_tb, - struct super_block *p_s_sb, + struct super_block *sb, struct treepath *p_s_path, int n_size) { @@ -1098,7 +1098,7 @@ static void init_tb_struct(struct reiserfs_transaction_handle *th, memset(p_s_tb, '\0', sizeof(struct tree_balance)); p_s_tb->transaction_handle = th; - p_s_tb->tb_sb = p_s_sb; + p_s_tb->tb_sb = sb; p_s_tb->tb_path = p_s_path; PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; @@ -1147,7 +1147,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath struct inode *p_s_inode, /* inode is here just to update i_blocks and quotas */ struct buffer_head *p_s_un_bh) { /* NULL or unformatted node pointer. */ - struct super_block *p_s_sb = p_s_inode->i_sb; + struct super_block *sb = p_s_inode->i_sb; struct tree_balance s_del_balance; struct item_head s_ih; struct item_head *q_ih; @@ -1161,7 +1161,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath BUG_ON(!th->t_trans_id); - init_tb_struct(th, &s_del_balance, p_s_sb, p_s_path, + init_tb_struct(th, &s_del_balance, sb, p_s_path, 0 /*size is unknown */ ); while (1) { @@ -1185,15 +1185,15 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath if (n_ret_value != REPEAT_SEARCH) break; - PROC_INFO_INC(p_s_sb, delete_item_restarted); + PROC_INFO_INC(sb, delete_item_restarted); // file system changed, repeat search n_ret_value = - search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path); + search_for_position_by_key(sb, p_s_item_key, p_s_path); if (n_ret_value == IO_ERROR) break; if (n_ret_value == FILE_NOT_FOUND) { - reiserfs_warning(p_s_sb, "vs-5340", + reiserfs_warning(sb, "vs-5340", "no items of the file %K found", p_s_item_key); break; @@ -1216,8 +1216,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath ** the unfm node once */ if (!S_ISLNK(p_s_inode->i_mode) && is_direct_le_ih(q_ih)) { - if ((le_ih_k_offset(q_ih) & (p_s_sb->s_blocksize - 1)) == 1) { - quota_cut_bytes = p_s_sb->s_blocksize + UNFM_P_SIZE; + if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) { + quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE; } else { quota_cut_bytes = 0; } @@ -1258,7 +1258,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath do_balance(&s_del_balance, NULL, NULL, M_DELETE); #ifdef REISERQUOTA_DEBUG - reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, + reiserfs_debug(sb, REISERFS_DEBUG_CODE, "reiserquota delete_item(): freeing %u, id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih)); #endif @@ -1430,8 +1430,8 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, const struct cpu_key *p_s_item_key, loff_t n_new_file_size, char *p_c_mode) { - struct super_block *p_s_sb = p_s_inode->i_sb; - int n_block_size = p_s_sb->s_blocksize; + struct super_block *sb = p_s_inode->i_sb; + int n_block_size = sb->s_blocksize; int cut_bytes; BUG_ON(!th->t_trans_id); BUG_ON(n_new_file_size != p_s_inode->i_size); @@ -1509,7 +1509,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, struct inode *p_s_inode, struct page *page, loff_t n_new_file_size) { - struct super_block *p_s_sb = p_s_inode->i_sb; + struct super_block *sb = p_s_inode->i_sb; /* Every function which is going to call do_balance must first create a tree_balance structure. Then it must fill up this structure by using the init_tb_struct and fix_nodes functions. @@ -1560,7 +1560,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, /* removing of last unformatted node will change value we have to return to truncate. Save it */ retval2 = n_ret_value; - /*retval2 = p_s_sb->s_blocksize - (n_new_file_size & (p_s_sb->s_blocksize - 1)); */ + /*retval2 = sb->s_blocksize - (n_new_file_size & (sb->s_blocksize - 1)); */ /* So, we have performed the first part of the conversion: inserting the new direct item. Now we are removing the @@ -1569,16 +1569,16 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, set_cpu_key_k_type(p_s_item_key, TYPE_INDIRECT); p_s_item_key->key_length = 4; n_new_file_size -= - (n_new_file_size & (p_s_sb->s_blocksize - 1)); + (n_new_file_size & (sb->s_blocksize - 1)); tail_pos = n_new_file_size; set_cpu_key_k_offset(p_s_item_key, n_new_file_size + 1); if (search_for_position_by_key - (p_s_sb, p_s_item_key, + (sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND) { print_block(PATH_PLAST_BUFFER(p_s_path), 3, PATH_LAST_POSITION(p_s_path) - 1, PATH_LAST_POSITION(p_s_path) + 1); - reiserfs_panic(p_s_sb, "PAP-5580", "item to " + reiserfs_panic(sb, "PAP-5580", "item to " "convert does not exist (%K)", p_s_item_key); } @@ -1595,14 +1595,14 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, if (n_ret_value != REPEAT_SEARCH) break; - PROC_INFO_INC(p_s_sb, cut_from_item_restarted); + PROC_INFO_INC(sb, cut_from_item_restarted); n_ret_value = - search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path); + search_for_position_by_key(sb, p_s_item_key, p_s_path); if (n_ret_value == POSITION_FOUND) continue; - reiserfs_warning(p_s_sb, "PAP-5610", "item %K not found", + reiserfs_warning(sb, "PAP-5610", "item %K not found", p_s_item_key); unfix_nodes(&s_cut_balance); return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT; @@ -1616,7 +1616,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, indirect_to_direct_roll_back(th, p_s_inode, p_s_path); } if (n_ret_value == NO_DISK_SPACE) - reiserfs_warning(p_s_sb, "reiserfs-5092", + reiserfs_warning(sb, "reiserfs-5092", "NO_DISK_SPACE"); unfix_nodes(&s_cut_balance); return -EIO; @@ -1642,11 +1642,11 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); if (!S_ISLNK(p_s_inode->i_mode) && is_direct_le_ih(p_le_ih)) { if (c_mode == M_DELETE && - (le_ih_k_offset(p_le_ih) & (p_s_sb->s_blocksize - 1)) == + (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) == 1) { // FIXME: this is to keep 3.5 happy REISERFS_I(p_s_inode)->i_first_direct_byte = U32_MAX; - quota_cut_bytes = p_s_sb->s_blocksize + UNFM_P_SIZE; + quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE; } else { quota_cut_bytes = 0; } @@ -1659,18 +1659,18 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, sure, that we exactly remove last unformatted node pointer of the item */ if (!is_indirect_le_ih(le_ih)) - reiserfs_panic(p_s_sb, "vs-5652", + reiserfs_panic(sb, "vs-5652", "item must be indirect %h", le_ih); if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE) - reiserfs_panic(p_s_sb, "vs-5653", "completing " + reiserfs_panic(sb, "vs-5653", "completing " "indirect2direct conversion indirect " "item %h being deleted must be of " "4 byte long", le_ih); if (c_mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) { - reiserfs_panic(p_s_sb, "vs-5654", "can not complete " + reiserfs_panic(sb, "vs-5654", "can not complete " "indirect2direct conversion of %h " "(CUT, insert_size==%d)", le_ih, s_cut_balance.insert_size[0]); diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index 0635cfe0f0b7..27311a5f0469 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -175,9 +175,9 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in loff_t n_new_file_size, /* New file size. */ char *p_c_mode) { - struct super_block *p_s_sb = p_s_inode->i_sb; + struct super_block *sb = p_s_inode->i_sb; struct item_head s_ih; - unsigned long n_block_size = p_s_sb->s_blocksize; + unsigned long n_block_size = sb->s_blocksize; char *tail; int tail_len, round_tail_len; loff_t pos, pos1; /* position of first byte of the tail */ @@ -185,7 +185,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in BUG_ON(!th->t_trans_id); - REISERFS_SB(p_s_sb)->s_indirect2direct++; + REISERFS_SB(sb)->s_indirect2direct++; *p_c_mode = M_SKIP_BALANCING; @@ -200,7 +200,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in pos = le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE - - 1) * p_s_sb->s_blocksize; + 1) * sb->s_blocksize; pos1 = pos; // we are protected by i_mutex. The tail can not disapper, not @@ -211,18 +211,18 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in if (path_changed(&s_ih, p_s_path)) { /* re-search indirect item */ - if (search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) + if (search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND) - reiserfs_panic(p_s_sb, "PAP-5520", + reiserfs_panic(sb, "PAP-5520", "item to be converted %K does not exist", p_s_item_key); copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); #ifdef CONFIG_REISERFS_CHECK pos = le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE - - 1) * p_s_sb->s_blocksize; + 1) * sb->s_blocksize; if (pos != pos1) - reiserfs_panic(p_s_sb, "vs-5530", "tail position " + reiserfs_panic(sb, "vs-5530", "tail position " "changed while we were reading it"); #endif } diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index eb4e912e6bd3..9bd7800d989c 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1769,12 +1769,12 @@ int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *, int journal_mark_freed(struct reiserfs_transaction_handle *, struct super_block *, b_blocknr_t blocknr); int journal_transaction_should_end(struct reiserfs_transaction_handle *, int); -int reiserfs_in_journal(struct super_block *p_s_sb, unsigned int bmap_nr, - int bit_nr, int searchall, b_blocknr_t *next); +int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, + int bit_nr, int searchall, b_blocknr_t *next); int journal_begin(struct reiserfs_transaction_handle *, - struct super_block *p_s_sb, unsigned long); + struct super_block *sb, unsigned long); int journal_join_abort(struct reiserfs_transaction_handle *, - struct super_block *p_s_sb, unsigned long); + struct super_block *sb, unsigned long); void reiserfs_abort_journal(struct super_block *sb, int errno); void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); int reiserfs_allocate_list_bitmaps(struct super_block *s, @@ -1830,11 +1830,11 @@ static inline void copy_key(struct reiserfs_key *to, int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path); const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, - const struct super_block *p_s_sb); + const struct super_block *sb); int search_by_key(struct super_block *, const struct cpu_key *, struct treepath *, int); #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) -int search_for_position_by_key(struct super_block *p_s_sb, +int search_for_position_by_key(struct super_block *sb, const struct cpu_key *p_s_cpu_key, struct treepath *p_s_search_path); extern void decrement_bcount(struct buffer_head *p_s_bh); @@ -1978,7 +1978,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, #define PROC_INFO_MAX( sb, field, value ) VOID_V #define PROC_INFO_INC( sb, field ) VOID_V #define PROC_INFO_ADD( sb, field, val ) VOID_V -#define PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level ) VOID_V +#define PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level) VOID_V #endif /* dir.c */ -- cgit v1.2.3-59-g8ed1b From ad31a4fc0386e8590c51ca4b8f1ae1d8b8b2ac5e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:46 -0400 Subject: reiserfs: rename p_s_bh to bh This patch is a simple s/p_s_bh/bh/g to the reiserfs code. This is the second in a series of patches to rip out some of the awful variable naming in reiserfs. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/fix_node.c | 94 +++++++++++++++++++++------------------------ fs/reiserfs/stree.c | 63 +++++++++++++++--------------- include/linux/reiserfs_fs.h | 35 +++++++++-------- 3 files changed, 93 insertions(+), 99 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 799c0ce24291..ad42c45af44f 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -1887,7 +1887,7 @@ static int check_balance(int mode, /* Check whether parent at the path is the really parent of the current node.*/ static int get_direct_parent(struct tree_balance *p_s_tb, int n_h) { - struct buffer_head *p_s_bh; + struct buffer_head *bh; struct treepath *p_s_path = p_s_tb->tb_path; int n_position, n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h); @@ -1909,21 +1909,21 @@ static int get_direct_parent(struct tree_balance *p_s_tb, int n_h) } if (!B_IS_IN_TREE - (p_s_bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))) + (bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))) return REPEAT_SEARCH; /* Parent in the path is not in the tree. */ if ((n_position = PATH_OFFSET_POSITION(p_s_path, - n_path_offset - 1)) > B_NR_ITEMS(p_s_bh)) + n_path_offset - 1)) > B_NR_ITEMS(bh)) return REPEAT_SEARCH; - if (B_N_CHILD_NUM(p_s_bh, n_position) != + if (B_N_CHILD_NUM(bh, n_position) != PATH_OFFSET_PBUFFER(p_s_path, n_path_offset)->b_blocknr) /* Parent in the path is not parent of the current node in the tree. */ return REPEAT_SEARCH; - if (buffer_locked(p_s_bh)) { - __wait_on_buffer(p_s_bh); + if (buffer_locked(bh)) { + __wait_on_buffer(bh); if (FILESYSTEM_CHANGED_TB(p_s_tb)) return REPEAT_SEARCH; } @@ -1943,29 +1943,29 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1); unsigned long n_son_number; struct super_block *sb = p_s_tb->tb_sb; - struct buffer_head *p_s_bh; + struct buffer_head *bh; PROC_INFO_INC(sb, get_neighbors[n_h]); if (p_s_tb->lnum[n_h]) { /* We need left neighbor to balance S[n_h]. */ PROC_INFO_INC(sb, need_l_neighbor[n_h]); - p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); + bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); - RFALSE(p_s_bh == p_s_tb->FL[n_h] && + RFALSE(bh == p_s_tb->FL[n_h] && !PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset), "PAP-8270: invalid position in the parent"); n_child_position = - (p_s_bh == + (bh == p_s_tb->FL[n_h]) ? p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb-> FL[n_h]); n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position); - p_s_bh = sb_bread(sb, n_son_number); - if (!p_s_bh) + bh = sb_bread(sb, n_son_number); + if (!bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { - brelse(p_s_bh); + brelse(bh); PROC_INFO_INC(sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } @@ -1973,48 +1973,48 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) RFALSE(!B_IS_IN_TREE(p_s_tb->FL[n_h]) || n_child_position > B_NR_ITEMS(p_s_tb->FL[n_h]) || B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position) != - p_s_bh->b_blocknr, "PAP-8275: invalid parent"); - RFALSE(!B_IS_IN_TREE(p_s_bh), "PAP-8280: invalid child"); + bh->b_blocknr, "PAP-8275: invalid parent"); + RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child"); RFALSE(!n_h && - B_FREE_SPACE(p_s_bh) != - MAX_CHILD_SIZE(p_s_bh) - + B_FREE_SPACE(bh) != + MAX_CHILD_SIZE(bh) - dc_size(B_N_CHILD(p_s_tb->FL[0], n_child_position)), "PAP-8290: invalid child size of left neighbor"); brelse(p_s_tb->L[n_h]); - p_s_tb->L[n_h] = p_s_bh; + p_s_tb->L[n_h] = bh; } if (p_s_tb->rnum[n_h]) { /* We need right neighbor to balance S[n_path_offset]. */ PROC_INFO_INC(sb, need_r_neighbor[n_h]); - p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); + bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); - RFALSE(p_s_bh == p_s_tb->FR[n_h] && + RFALSE(bh == p_s_tb->FR[n_h] && PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset) >= - B_NR_ITEMS(p_s_bh), + B_NR_ITEMS(bh), "PAP-8295: invalid position in the parent"); n_child_position = - (p_s_bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0; + (bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0; n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position); - p_s_bh = sb_bread(sb, n_son_number); - if (!p_s_bh) + bh = sb_bread(sb, n_son_number); + if (!bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(p_s_tb)) { - brelse(p_s_bh); + brelse(bh); PROC_INFO_INC(sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } brelse(p_s_tb->R[n_h]); - p_s_tb->R[n_h] = p_s_bh; + p_s_tb->R[n_h] = bh; RFALSE(!n_h - && B_FREE_SPACE(p_s_bh) != - MAX_CHILD_SIZE(p_s_bh) - + && B_FREE_SPACE(bh) != + MAX_CHILD_SIZE(bh) - dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position)), "PAP-8300: invalid child size of right neighbor (%d != %d - %d)", - B_FREE_SPACE(p_s_bh), MAX_CHILD_SIZE(p_s_bh), + B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh), dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position))); } @@ -2090,51 +2090,45 @@ static int get_mem_for_virtual_node(struct tree_balance *tb) #ifdef CONFIG_REISERFS_CHECK static void tb_buffer_sanity_check(struct super_block *sb, - struct buffer_head *p_s_bh, + struct buffer_head *bh, const char *descr, int level) { - if (p_s_bh) { - if (atomic_read(&(p_s_bh->b_count)) <= 0) { + if (bh) { + if (atomic_read(&(bh->b_count)) <= 0) reiserfs_panic(sb, "jmacd-1", "negative or zero " "reference counter for buffer %s[%d] " - "(%b)", descr, level, p_s_bh); - } + "(%b)", descr, level, bh); - if (!buffer_uptodate(p_s_bh)) { + if (!buffer_uptodate(bh)) reiserfs_panic(sb, "jmacd-2", "buffer is not up " "to date %s[%d] (%b)", - descr, level, p_s_bh); - } + descr, level, bh); - if (!B_IS_IN_TREE(p_s_bh)) { + if (!B_IS_IN_TREE(bh)) reiserfs_panic(sb, "jmacd-3", "buffer is not " "in tree %s[%d] (%b)", - descr, level, p_s_bh); - } + descr, level, bh); - if (p_s_bh->b_bdev != sb->s_bdev) { + if (bh->b_bdev != sb->s_bdev) reiserfs_panic(sb, "jmacd-4", "buffer has wrong " "device %s[%d] (%b)", - descr, level, p_s_bh); - } + descr, level, bh); - if (p_s_bh->b_size != sb->s_blocksize) { + if (bh->b_size != sb->s_blocksize) reiserfs_panic(sb, "jmacd-5", "buffer has wrong " "blocksize %s[%d] (%b)", - descr, level, p_s_bh); - } + descr, level, bh); - if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(sb)) { + if (bh->b_blocknr > SB_BLOCK_COUNT(sb)) reiserfs_panic(sb, "jmacd-6", "buffer block " "number too high %s[%d] (%b)", - descr, level, p_s_bh); - } + descr, level, bh); } } #else static void tb_buffer_sanity_check(struct super_block *sb, - struct buffer_head *p_s_bh, + struct buffer_head *bh, const char *descr, int level) {; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 00fd879c4a2a..eb6856f6d323 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -56,13 +56,13 @@ #include /* Does the buffer contain a disk block which is in the tree. */ -inline int B_IS_IN_TREE(const struct buffer_head *p_s_bh) +inline int B_IS_IN_TREE(const struct buffer_head *bh) { - RFALSE(B_LEVEL(p_s_bh) > MAX_HEIGHT, - "PAP-1010: block (%b) has too big level (%z)", p_s_bh, p_s_bh); + RFALSE(B_LEVEL(bh) > MAX_HEIGHT, + "PAP-1010: block (%b) has too big level (%z)", bh, bh); - return (B_LEVEL(p_s_bh) != FREE_LEVEL); + return (B_LEVEL(bh) != FREE_LEVEL); } // @@ -579,7 +579,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key { b_blocknr_t n_block_number; int expected_level; - struct buffer_head *p_s_bh; + struct buffer_head *bh; struct path_element *p_s_last_element; int n_node_level, n_retval; int right_neighbor_of_leaf_node; @@ -626,15 +626,14 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key /* Read the next tree node, and set the last element in the path to have a pointer to it. */ - if ((p_s_bh = p_s_last_element->pe_buffer = + if ((bh = p_s_last_element->pe_buffer = sb_getblk(sb, n_block_number))) { - if (!buffer_uptodate(p_s_bh) && reada_count > 1) { + if (!buffer_uptodate(bh) && reada_count > 1) search_by_key_reada(sb, reada_bh, reada_blocks, reada_count); - } - ll_rw_block(READ, 1, &p_s_bh); - wait_on_buffer(p_s_bh); - if (!buffer_uptodate(p_s_bh)) + ll_rw_block(READ, 1, &bh); + wait_on_buffer(bh); + if (!buffer_uptodate(bh)) goto io_error; } else { io_error: @@ -651,8 +650,8 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key to search is still in the tree rooted from the current buffer. If not then repeat search from the root. */ if (fs_changed(fs_gen, sb) && - (!B_IS_IN_TREE(p_s_bh) || - B_LEVEL(p_s_bh) != expected_level || + (!B_IS_IN_TREE(bh) || + B_LEVEL(bh) != expected_level || !key_in_buffer(p_s_search_path, p_s_key, sb))) { PROC_INFO_INC(sb, search_by_key_fs_changed); PROC_INFO_INC(sb, search_by_key_restarted); @@ -686,25 +685,25 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key // make sure, that the node contents look like a node of // certain level - if (!is_tree_node(p_s_bh, expected_level)) { + if (!is_tree_node(bh, expected_level)) { reiserfs_error(sb, "vs-5150", "invalid format found in block %ld. " - "Fsck?", p_s_bh->b_blocknr); + "Fsck?", bh->b_blocknr); pathrelse(p_s_search_path); return IO_ERROR; } /* ok, we have acquired next formatted node in the tree */ - n_node_level = B_LEVEL(p_s_bh); + n_node_level = B_LEVEL(bh); - PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level - 1); + PROC_INFO_BH_STAT(sb, bh, n_node_level - 1); RFALSE(n_node_level < n_stop_level, "vs-5152: tree level (%d) is less than stop level (%d)", n_node_level, n_stop_level); - n_retval = bin_search(p_s_key, B_N_PITEM_HEAD(p_s_bh, 0), - B_NR_ITEMS(p_s_bh), + n_retval = bin_search(p_s_key, B_N_PITEM_HEAD(bh, 0), + B_NR_ITEMS(bh), (n_node_level == DISK_LEAF_NODE_LEVEL) ? IH_SIZE : KEY_SIZE, @@ -726,13 +725,13 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key an internal node. Now we calculate child block number by position in the node. */ n_block_number = - B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position); + B_N_CHILD_NUM(bh, p_s_last_element->pe_position); /* if we are going to read leaf nodes, try for read ahead as well */ if ((p_s_search_path->reada & PATH_READA) && n_node_level == DISK_LEAF_NODE_LEVEL + 1) { int pos = p_s_last_element->pe_position; - int limit = B_NR_ITEMS(p_s_bh); + int limit = B_NR_ITEMS(bh); struct reiserfs_key *le_key; if (p_s_search_path->reada & PATH_READA_BACK) @@ -741,7 +740,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key if (pos == limit) break; reada_blocks[reada_count++] = - B_N_CHILD_NUM(p_s_bh, pos); + B_N_CHILD_NUM(bh, pos); if (p_s_search_path->reada & PATH_READA_BACK) pos--; else @@ -750,7 +749,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key /* * check to make sure we're in the same object */ - le_key = B_N_PDELIM_KEY(p_s_bh, pos); + le_key = B_N_PDELIM_KEY(bh, pos); if (le32_to_cpu(le_key->k_objectid) != p_s_key->on_disk_key.k_objectid) { break; @@ -851,15 +850,15 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b /* Compare given item and item pointed to by the path. */ int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path) { - struct buffer_head *p_s_bh; + struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path); struct item_head *ih; /* Last buffer at the path is not in the tree. */ - if (!B_IS_IN_TREE(p_s_bh = PATH_PLAST_BUFFER(p_s_path))) + if (!B_IS_IN_TREE(bh)) return 1; /* Last path position is invalid. */ - if (PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(p_s_bh)) + if (PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(bh)) return 1; /* we need only to know, whether it is the same item */ @@ -959,7 +958,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st { struct super_block *sb = inode->i_sb; struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_path); - struct buffer_head *p_s_bh = PATH_PLAST_BUFFER(p_s_path); + struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path); BUG_ON(!th->t_trans_id); @@ -1003,7 +1002,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st do { need_re_search = 0; *p_n_cut_size = 0; - p_s_bh = PATH_PLAST_BUFFER(p_s_path); + bh = PATH_PLAST_BUFFER(p_s_path); copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); pos = I_UNFM_NUM(&s_ih); @@ -1019,13 +1018,13 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st break; } - unfm = (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1; + unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1; block = get_block_num(unfm, 0); if (block != 0) { - reiserfs_prepare_for_journal(sb, p_s_bh, 1); + reiserfs_prepare_for_journal(sb, bh, 1); put_block_num(unfm, 0, 0); - journal_mark_dirty (th, sb, p_s_bh); + journal_mark_dirty(th, sb, bh); reiserfs_free_block(th, inode, block, 1); } @@ -1049,7 +1048,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st /* a trick. If the buffer has been logged, this will do nothing. If ** we've broken the loop without logging it, it will restore the ** buffer */ - reiserfs_restore_prepared_buffer(sb, p_s_bh); + reiserfs_restore_prepared_buffer(sb, bh); } while (need_re_search && search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_FOUND); pos_in_item(p_s_path) = pos * UNFM_P_SIZE; diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 9bd7800d989c..9cfa518c90b6 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -751,25 +751,25 @@ struct block_head { #define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */ /* Given the buffer head of a formatted node, resolve to the block head of that node. */ -#define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data)) +#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data)) /* Number of items that are in buffer. */ -#define B_NR_ITEMS(p_s_bh) (blkh_nr_item(B_BLK_HEAD(p_s_bh))) -#define B_LEVEL(p_s_bh) (blkh_level(B_BLK_HEAD(p_s_bh))) -#define B_FREE_SPACE(p_s_bh) (blkh_free_space(B_BLK_HEAD(p_s_bh))) +#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh))) +#define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh))) +#define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh))) -#define PUT_B_NR_ITEMS(p_s_bh,val) do { set_blkh_nr_item(B_BLK_HEAD(p_s_bh),val); } while (0) -#define PUT_B_LEVEL(p_s_bh,val) do { set_blkh_level(B_BLK_HEAD(p_s_bh),val); } while (0) -#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0) +#define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0) +#define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0) +#define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0) /* Get right delimiting key. -- little endian */ -#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)))) +#define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh)))) /* Does the buffer contain a disk leaf. */ -#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL) +#define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL) /* Does the buffer contain a disk internal node */ -#define B_IS_KEYS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) > DISK_LEAF_NODE_LEVEL \ - && B_LEVEL(p_s_bh) <= MAX_HEIGHT) +#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \ + && B_LEVEL(bh) <= MAX_HEIGHT) /***************************************************************************/ /* STAT DATA */ @@ -1119,12 +1119,13 @@ struct disk_child { #define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0) /* Get disk child by buffer header and position in the tree node. */ -#define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\ -((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos))) +#define B_N_CHILD(bh, n_pos) ((struct disk_child *)\ +((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos))) /* Get disk child number by buffer header and position in the tree node. */ -#define B_N_CHILD_NUM(p_s_bh,n_pos) (dc_block_number(B_N_CHILD(p_s_bh,n_pos))) -#define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) (put_dc_block_number(B_N_CHILD(p_s_bh,n_pos), val )) +#define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos))) +#define PUT_B_N_CHILD_NUM(bh, n_pos, val) \ + (put_dc_block_number(B_N_CHILD(bh, n_pos), val)) /* maximal value of field child_size in structure disk_child */ /* child size is the combined size of all items and their headers */ @@ -1837,7 +1838,7 @@ int search_by_key(struct super_block *, const struct cpu_key *, int search_for_position_by_key(struct super_block *sb, const struct cpu_key *p_s_cpu_key, struct treepath *p_s_search_path); -extern void decrement_bcount(struct buffer_head *p_s_bh); +extern void decrement_bcount(struct buffer_head *bh); void decrement_counters_in_path(struct treepath *p_s_search_path); void pathrelse(struct treepath *p_s_search_path); int reiserfs_check_path(struct treepath *p); @@ -1978,7 +1979,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, #define PROC_INFO_MAX( sb, field, value ) VOID_V #define PROC_INFO_INC( sb, field ) VOID_V #define PROC_INFO_ADD( sb, field, val ) VOID_V -#define PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level) VOID_V +#define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V #endif /* dir.c */ -- cgit v1.2.3-59-g8ed1b From a063ae17925cafabe55ebe1957ca0e8c480bd132 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:48 -0400 Subject: reiserfs: rename p_s_tb to tb This patch is a simple s/p_s_tb/tb/g to the reiserfs code. This is the fourth in a series of patches to rip out some of the awful variable naming in reiserfs. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/fix_node.c | 482 ++++++++++++++++++++++---------------------- fs/reiserfs/stree.c | 21 +- include/linux/reiserfs_fs.h | 2 +- 3 files changed, 254 insertions(+), 251 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index ad42c45af44f..5236a8829e31 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -749,26 +749,26 @@ else \ -1, -1);\ } -static void free_buffers_in_tb(struct tree_balance *p_s_tb) +static void free_buffers_in_tb(struct tree_balance *tb) { int n_counter; - pathrelse(p_s_tb->tb_path); + pathrelse(tb->tb_path); for (n_counter = 0; n_counter < MAX_HEIGHT; n_counter++) { - brelse(p_s_tb->L[n_counter]); - brelse(p_s_tb->R[n_counter]); - brelse(p_s_tb->FL[n_counter]); - brelse(p_s_tb->FR[n_counter]); - brelse(p_s_tb->CFL[n_counter]); - brelse(p_s_tb->CFR[n_counter]); - - p_s_tb->L[n_counter] = NULL; - p_s_tb->R[n_counter] = NULL; - p_s_tb->FL[n_counter] = NULL; - p_s_tb->FR[n_counter] = NULL; - p_s_tb->CFL[n_counter] = NULL; - p_s_tb->CFR[n_counter] = NULL; + brelse(tb->L[n_counter]); + brelse(tb->R[n_counter]); + brelse(tb->FL[n_counter]); + brelse(tb->FR[n_counter]); + brelse(tb->CFL[n_counter]); + brelse(tb->CFR[n_counter]); + + tb->L[n_counter] = NULL; + tb->R[n_counter] = NULL; + tb->FL[n_counter] = NULL; + tb->FR[n_counter] = NULL; + tb->CFL[n_counter] = NULL; + tb->CFR[n_counter] = NULL; } } @@ -778,14 +778,14 @@ static void free_buffers_in_tb(struct tree_balance *p_s_tb) * NO_DISK_SPACE - no disk space. */ /* The function is NOT SCHEDULE-SAFE! */ -static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h) +static int get_empty_nodes(struct tree_balance *tb, int n_h) { struct buffer_head *p_s_new_bh, - *p_s_Sh = PATH_H_PBUFFER(p_s_tb->tb_path, n_h); + *p_s_Sh = PATH_H_PBUFFER(tb->tb_path, n_h); b_blocknr_t *p_n_blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */ n_retval = CARRY_ON; - struct super_block *sb = p_s_tb->tb_sb; + struct super_block *sb = tb->tb_sb; /* number_of_freeblk is the number of empty blocks which have been acquired for use by the balancing algorithm minus the number of @@ -803,15 +803,15 @@ static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h) the analysis or 0 if not restarted, then subtract the amount needed by all of the levels of the tree below n_h. */ /* blknum includes S[n_h], so we subtract 1 in this calculation */ - for (n_counter = 0, n_number_of_freeblk = p_s_tb->cur_blknum; + for (n_counter = 0, n_number_of_freeblk = tb->cur_blknum; n_counter < n_h; n_counter++) n_number_of_freeblk -= - (p_s_tb->blknum[n_counter]) ? (p_s_tb->blknum[n_counter] - + (tb->blknum[n_counter]) ? (tb->blknum[n_counter] - 1) : 0; /* Allocate missing empty blocks. */ /* if p_s_Sh == 0 then we are getting a new root */ - n_amount_needed = (p_s_Sh) ? (p_s_tb->blknum[n_h] - 1) : 1; + n_amount_needed = (p_s_Sh) ? (tb->blknum[n_h] - 1) : 1; /* Amount_needed = the amount that we need more than the amount that we have. */ if (n_amount_needed > n_number_of_freeblk) n_amount_needed -= n_number_of_freeblk; @@ -819,7 +819,7 @@ static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h) return CARRY_ON; /* No need to check quota - is not allocated for blocks used for formatted nodes */ - if (reiserfs_new_form_blocknrs(p_s_tb, a_n_blocknrs, + if (reiserfs_new_form_blocknrs(tb, a_n_blocknrs, n_amount_needed) == NO_DISK_SPACE) return NO_DISK_SPACE; @@ -838,14 +838,14 @@ static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h) p_s_new_bh); /* Put empty buffers into the array. */ - RFALSE(p_s_tb->FEB[p_s_tb->cur_blknum], + RFALSE(tb->FEB[tb->cur_blknum], "PAP-8141: busy slot for new buffer"); set_buffer_journal_new(p_s_new_bh); - p_s_tb->FEB[p_s_tb->cur_blknum++] = p_s_new_bh; + tb->FEB[tb->cur_blknum++] = p_s_new_bh; } - if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(p_s_tb)) + if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb)) n_retval = REPEAT_SEARCH; return n_retval; @@ -896,33 +896,34 @@ static int get_rfree(struct tree_balance *tb, int h) } /* Check whether left neighbor is in memory. */ -static int is_left_neighbor_in_cache(struct tree_balance *p_s_tb, int n_h) +static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h) { struct buffer_head *p_s_father, *left; - struct super_block *sb = p_s_tb->tb_sb; + struct super_block *sb = tb->tb_sb; b_blocknr_t n_left_neighbor_blocknr; int n_left_neighbor_position; - if (!p_s_tb->FL[n_h]) /* Father of the left neighbor does not exist. */ + /* Father of the left neighbor does not exist. */ + if (!tb->FL[n_h]) return 0; /* Calculate father of the node to be balanced. */ - p_s_father = PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1); + p_s_father = PATH_H_PBUFFER(tb->tb_path, n_h + 1); RFALSE(!p_s_father || !B_IS_IN_TREE(p_s_father) || - !B_IS_IN_TREE(p_s_tb->FL[n_h]) || + !B_IS_IN_TREE(tb->FL[n_h]) || !buffer_uptodate(p_s_father) || - !buffer_uptodate(p_s_tb->FL[n_h]), + !buffer_uptodate(tb->FL[n_h]), "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", - p_s_father, p_s_tb->FL[n_h]); + p_s_father, tb->FL[n_h]); /* Get position of the pointer to the left neighbor into the left father. */ - n_left_neighbor_position = (p_s_father == p_s_tb->FL[n_h]) ? - p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb->FL[n_h]); + n_left_neighbor_position = (p_s_father == tb->FL[n_h]) ? + tb->lkey[n_h] : B_NR_ITEMS(tb->FL[n_h]); /* Get left neighbor block number. */ n_left_neighbor_blocknr = - B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position); + B_N_CHILD_NUM(tb->FL[n_h], n_left_neighbor_position); /* Look for the left neighbor in the cache. */ if ((left = sb_find_get_block(sb, n_left_neighbor_blocknr))) { @@ -953,14 +954,14 @@ static void decrement_key(struct cpu_key *p_s_key) SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ -static int get_far_parent(struct tree_balance *p_s_tb, +static int get_far_parent(struct tree_balance *tb, int n_h, struct buffer_head **pp_s_father, struct buffer_head **pp_s_com_father, char c_lr_par) { struct buffer_head *p_s_parent; INITIALIZE_PATH(s_path_to_neighbor_father); - struct treepath *p_s_path = p_s_tb->tb_path; + struct treepath *p_s_path = tb->tb_path; struct cpu_key s_lr_father_key; int n_counter, n_position = INT_MAX, @@ -1005,9 +1006,9 @@ static int get_far_parent(struct tree_balance *p_s_tb, if (n_counter == FIRST_PATH_ELEMENT_OFFSET) { /* Check whether first buffer in the path is the root of the tree. */ if (PATH_OFFSET_PBUFFER - (p_s_tb->tb_path, + (tb->tb_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr == - SB_ROOT_BLOCK(p_s_tb->tb_sb)) { + SB_ROOT_BLOCK(tb->tb_sb)) { *pp_s_father = *pp_s_com_father = NULL; return CARRY_ON; } @@ -1022,7 +1023,7 @@ static int get_far_parent(struct tree_balance *p_s_tb, if (buffer_locked(*pp_s_com_father)) { __wait_on_buffer(*pp_s_com_father); - if (FILESYSTEM_CHANGED_TB(p_s_tb)) { + if (FILESYSTEM_CHANGED_TB(tb)) { brelse(*pp_s_com_father); return REPEAT_SEARCH; } @@ -1035,9 +1036,9 @@ static int get_far_parent(struct tree_balance *p_s_tb, le_key2cpu_key(&s_lr_father_key, B_N_PDELIM_KEY(*pp_s_com_father, (c_lr_par == - LEFT_PARENTS) ? (p_s_tb->lkey[n_h - 1] = + LEFT_PARENTS) ? (tb->lkey[n_h - 1] = n_position - - 1) : (p_s_tb->rkey[n_h - + 1) : (tb->rkey[n_h - 1] = n_position))); @@ -1045,12 +1046,12 @@ static int get_far_parent(struct tree_balance *p_s_tb, decrement_key(&s_lr_father_key); if (search_by_key - (p_s_tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, + (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, n_h + 1) == IO_ERROR) // path is released return IO_ERROR; - if (FILESYSTEM_CHANGED_TB(p_s_tb)) { + if (FILESYSTEM_CHANGED_TB(tb)) { pathrelse(&s_path_to_neighbor_father); brelse(*pp_s_com_father); return REPEAT_SEARCH; @@ -1075,24 +1076,26 @@ static int get_far_parent(struct tree_balance *p_s_tb, * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ -static int get_parents(struct tree_balance *p_s_tb, int n_h) +static int get_parents(struct tree_balance *tb, int n_h) { - struct treepath *p_s_path = p_s_tb->tb_path; + struct treepath *p_s_path = tb->tb_path; int n_position, n_ret_value, - n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h); + n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h); struct buffer_head *p_s_curf, *p_s_curcf; /* Current node is the root of the tree or will be root of the tree */ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) { /* The root can not have parents. Release nodes which previously were obtained as parents of the current node neighbors. */ - brelse(p_s_tb->FL[n_h]); - brelse(p_s_tb->CFL[n_h]); - brelse(p_s_tb->FR[n_h]); - brelse(p_s_tb->CFR[n_h]); - p_s_tb->FL[n_h] = p_s_tb->CFL[n_h] = p_s_tb->FR[n_h] = - p_s_tb->CFR[n_h] = NULL; + brelse(tb->FL[n_h]); + brelse(tb->CFL[n_h]); + brelse(tb->FR[n_h]); + brelse(tb->CFR[n_h]); + tb->FL[n_h] = NULL; + tb->CFL[n_h] = NULL; + tb->FR[n_h] = NULL; + tb->CFR[n_h] = NULL; return CARRY_ON; } @@ -1104,22 +1107,22 @@ static int get_parents(struct tree_balance *p_s_tb, int n_h) PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1); get_bh(p_s_curf); get_bh(p_s_curf); - p_s_tb->lkey[n_h] = n_position - 1; + tb->lkey[n_h] = n_position - 1; } else { /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node. Calculate current common parent of L[n_path_offset] and the current node. Note that CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset]. Calculate lkey[n_path_offset]. */ - if ((n_ret_value = get_far_parent(p_s_tb, n_h + 1, &p_s_curf, + if ((n_ret_value = get_far_parent(tb, n_h + 1, &p_s_curf, &p_s_curcf, LEFT_PARENTS)) != CARRY_ON) return n_ret_value; } - brelse(p_s_tb->FL[n_h]); - p_s_tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */ - brelse(p_s_tb->CFL[n_h]); - p_s_tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */ + brelse(tb->FL[n_h]); + tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */ + brelse(tb->CFL[n_h]); + tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) || (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)), @@ -1133,7 +1136,7 @@ static int get_parents(struct tree_balance *p_s_tb, int n_h) Calculate current common parent of R[n_h] and current node. Note that CFR[n_h] not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */ if ((n_ret_value = - get_far_parent(p_s_tb, n_h + 1, &p_s_curf, &p_s_curcf, + get_far_parent(tb, n_h + 1, &p_s_curf, &p_s_curcf, RIGHT_PARENTS)) != CARRY_ON) return n_ret_value; } else { @@ -1143,14 +1146,16 @@ static int get_parents(struct tree_balance *p_s_tb, int n_h) PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1); get_bh(p_s_curf); get_bh(p_s_curf); - p_s_tb->rkey[n_h] = n_position; + tb->rkey[n_h] = n_position; } - brelse(p_s_tb->FR[n_h]); - p_s_tb->FR[n_h] = p_s_curf; /* New initialization of FR[n_path_offset]. */ + brelse(tb->FR[n_h]); + /* New initialization of FR[n_path_offset]. */ + tb->FR[n_h] = p_s_curf; - brelse(p_s_tb->CFR[n_h]); - p_s_tb->CFR[n_h] = p_s_curcf; /* New initialization of CFR[n_path_offset]. */ + brelse(tb->CFR[n_h]); + /* New initialization of CFR[n_path_offset]. */ + tb->CFR[n_h] = p_s_curcf; RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) || (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)), @@ -1885,12 +1890,12 @@ static int check_balance(int mode, } /* Check whether parent at the path is the really parent of the current node.*/ -static int get_direct_parent(struct tree_balance *p_s_tb, int n_h) +static int get_direct_parent(struct tree_balance *tb, int n_h) { struct buffer_head *bh; - struct treepath *p_s_path = p_s_tb->tb_path; + struct treepath *p_s_path = tb->tb_path; int n_position, - n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h); + n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h); /* We are in the root or in the new root. */ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) { @@ -1899,7 +1904,7 @@ static int get_direct_parent(struct tree_balance *p_s_tb, int n_h) "PAP-8260: invalid offset in the path"); if (PATH_OFFSET_PBUFFER(p_s_path, FIRST_PATH_ELEMENT_OFFSET)-> - b_blocknr == SB_ROOT_BLOCK(p_s_tb->tb_sb)) { + b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { /* Root is not changed. */ PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1) = NULL; PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1) = 0; @@ -1924,7 +1929,7 @@ static int get_direct_parent(struct tree_balance *p_s_tb, int n_h) if (buffer_locked(bh)) { __wait_on_buffer(bh); - if (FILESYSTEM_CHANGED_TB(p_s_tb)) + if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; } @@ -1937,85 +1942,86 @@ static int get_direct_parent(struct tree_balance *p_s_tb, int n_h) * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ -static int get_neighbors(struct tree_balance *p_s_tb, int n_h) +static int get_neighbors(struct tree_balance *tb, int n_h) { int n_child_position, - n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1); + n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h + 1); unsigned long n_son_number; - struct super_block *sb = p_s_tb->tb_sb; + struct super_block *sb = tb->tb_sb; struct buffer_head *bh; PROC_INFO_INC(sb, get_neighbors[n_h]); - if (p_s_tb->lnum[n_h]) { + if (tb->lnum[n_h]) { /* We need left neighbor to balance S[n_h]. */ PROC_INFO_INC(sb, need_l_neighbor[n_h]); - bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); + bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset); - RFALSE(bh == p_s_tb->FL[n_h] && - !PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset), + RFALSE(bh == tb->FL[n_h] && + !PATH_OFFSET_POSITION(tb->tb_path, n_path_offset), "PAP-8270: invalid position in the parent"); n_child_position = (bh == - p_s_tb->FL[n_h]) ? p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb-> + tb->FL[n_h]) ? tb->lkey[n_h] : B_NR_ITEMS(tb-> FL[n_h]); - n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position); + n_son_number = B_N_CHILD_NUM(tb->FL[n_h], n_child_position); bh = sb_bread(sb, n_son_number); if (!bh) return IO_ERROR; - if (FILESYSTEM_CHANGED_TB(p_s_tb)) { + if (FILESYSTEM_CHANGED_TB(tb)) { brelse(bh); PROC_INFO_INC(sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } - RFALSE(!B_IS_IN_TREE(p_s_tb->FL[n_h]) || - n_child_position > B_NR_ITEMS(p_s_tb->FL[n_h]) || - B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position) != + RFALSE(!B_IS_IN_TREE(tb->FL[n_h]) || + n_child_position > B_NR_ITEMS(tb->FL[n_h]) || + B_N_CHILD_NUM(tb->FL[n_h], n_child_position) != bh->b_blocknr, "PAP-8275: invalid parent"); RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child"); RFALSE(!n_h && B_FREE_SPACE(bh) != MAX_CHILD_SIZE(bh) - - dc_size(B_N_CHILD(p_s_tb->FL[0], n_child_position)), + dc_size(B_N_CHILD(tb->FL[0], n_child_position)), "PAP-8290: invalid child size of left neighbor"); - brelse(p_s_tb->L[n_h]); - p_s_tb->L[n_h] = bh; + brelse(tb->L[n_h]); + tb->L[n_h] = bh; } - if (p_s_tb->rnum[n_h]) { /* We need right neighbor to balance S[n_path_offset]. */ + /* We need right neighbor to balance S[n_path_offset]. */ + if (tb->rnum[n_h]) { PROC_INFO_INC(sb, need_r_neighbor[n_h]); - bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset); + bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset); - RFALSE(bh == p_s_tb->FR[n_h] && - PATH_OFFSET_POSITION(p_s_tb->tb_path, + RFALSE(bh == tb->FR[n_h] && + PATH_OFFSET_POSITION(tb->tb_path, n_path_offset) >= B_NR_ITEMS(bh), "PAP-8295: invalid position in the parent"); n_child_position = - (bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0; - n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position); + (bh == tb->FR[n_h]) ? tb->rkey[n_h] + 1 : 0; + n_son_number = B_N_CHILD_NUM(tb->FR[n_h], n_child_position); bh = sb_bread(sb, n_son_number); if (!bh) return IO_ERROR; - if (FILESYSTEM_CHANGED_TB(p_s_tb)) { + if (FILESYSTEM_CHANGED_TB(tb)) { brelse(bh); PROC_INFO_INC(sb, get_neighbors_restart[n_h]); return REPEAT_SEARCH; } - brelse(p_s_tb->R[n_h]); - p_s_tb->R[n_h] = bh; + brelse(tb->R[n_h]); + tb->R[n_h] = bh; RFALSE(!n_h && B_FREE_SPACE(bh) != MAX_CHILD_SIZE(bh) - - dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position)), + dc_size(B_N_CHILD(tb->FR[0], n_child_position)), "PAP-8300: invalid child size of right neighbor (%d != %d - %d)", B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh), - dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position))); + dc_size(B_N_CHILD(tb->FR[0], n_child_position))); } return CARRY_ON; @@ -2139,7 +2145,7 @@ static int clear_all_dirty_bits(struct super_block *s, struct buffer_head *bh) return reiserfs_prepare_for_journal(s, bh, 0); } -static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) +static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) { struct buffer_head *locked; #ifdef CONFIG_REISERFS_CHECK @@ -2151,95 +2157,94 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) locked = NULL; - for (i = p_s_tb->tb_path->path_length; + for (i = tb->tb_path->path_length; !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) { - if (PATH_OFFSET_PBUFFER(p_s_tb->tb_path, i)) { + if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) { /* if I understand correctly, we can only be sure the last buffer ** in the path is in the tree --clm */ #ifdef CONFIG_REISERFS_CHECK - if (PATH_PLAST_BUFFER(p_s_tb->tb_path) == - PATH_OFFSET_PBUFFER(p_s_tb->tb_path, i)) { - tb_buffer_sanity_check(p_s_tb->tb_sb, + if (PATH_PLAST_BUFFER(tb->tb_path) == + PATH_OFFSET_PBUFFER(tb->tb_path, i)) + tb_buffer_sanity_check(tb->tb_sb, PATH_OFFSET_PBUFFER - (p_s_tb->tb_path, + (tb->tb_path, i), "S", - p_s_tb->tb_path-> + tb->tb_path-> path_length - i); - } #endif - if (!clear_all_dirty_bits(p_s_tb->tb_sb, + if (!clear_all_dirty_bits(tb->tb_sb, PATH_OFFSET_PBUFFER - (p_s_tb->tb_path, + (tb->tb_path, i))) { locked = - PATH_OFFSET_PBUFFER(p_s_tb->tb_path, + PATH_OFFSET_PBUFFER(tb->tb_path, i); } } } - for (i = 0; !locked && i < MAX_HEIGHT && p_s_tb->insert_size[i]; + for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i]; i++) { - if (p_s_tb->lnum[i]) { + if (tb->lnum[i]) { - if (p_s_tb->L[i]) { - tb_buffer_sanity_check(p_s_tb->tb_sb, - p_s_tb->L[i], + if (tb->L[i]) { + tb_buffer_sanity_check(tb->tb_sb, + tb->L[i], "L", i); if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->L[i])) - locked = p_s_tb->L[i]; + (tb->tb_sb, tb->L[i])) + locked = tb->L[i]; } - if (!locked && p_s_tb->FL[i]) { - tb_buffer_sanity_check(p_s_tb->tb_sb, - p_s_tb->FL[i], + if (!locked && tb->FL[i]) { + tb_buffer_sanity_check(tb->tb_sb, + tb->FL[i], "FL", i); if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->FL[i])) - locked = p_s_tb->FL[i]; + (tb->tb_sb, tb->FL[i])) + locked = tb->FL[i]; } - if (!locked && p_s_tb->CFL[i]) { - tb_buffer_sanity_check(p_s_tb->tb_sb, - p_s_tb->CFL[i], + if (!locked && tb->CFL[i]) { + tb_buffer_sanity_check(tb->tb_sb, + tb->CFL[i], "CFL", i); if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->CFL[i])) - locked = p_s_tb->CFL[i]; + (tb->tb_sb, tb->CFL[i])) + locked = tb->CFL[i]; } } - if (!locked && (p_s_tb->rnum[i])) { + if (!locked && (tb->rnum[i])) { - if (p_s_tb->R[i]) { - tb_buffer_sanity_check(p_s_tb->tb_sb, - p_s_tb->R[i], + if (tb->R[i]) { + tb_buffer_sanity_check(tb->tb_sb, + tb->R[i], "R", i); if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->R[i])) - locked = p_s_tb->R[i]; + (tb->tb_sb, tb->R[i])) + locked = tb->R[i]; } - if (!locked && p_s_tb->FR[i]) { - tb_buffer_sanity_check(p_s_tb->tb_sb, - p_s_tb->FR[i], + if (!locked && tb->FR[i]) { + tb_buffer_sanity_check(tb->tb_sb, + tb->FR[i], "FR", i); if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->FR[i])) - locked = p_s_tb->FR[i]; + (tb->tb_sb, tb->FR[i])) + locked = tb->FR[i]; } - if (!locked && p_s_tb->CFR[i]) { - tb_buffer_sanity_check(p_s_tb->tb_sb, - p_s_tb->CFR[i], + if (!locked && tb->CFR[i]) { + tb_buffer_sanity_check(tb->tb_sb, + tb->CFR[i], "CFR", i); if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->CFR[i])) - locked = p_s_tb->CFR[i]; + (tb->tb_sb, tb->CFR[i])) + locked = tb->CFR[i]; } } } @@ -2252,10 +2257,10 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) ** --clm */ for (i = 0; !locked && i < MAX_FEB_SIZE; i++) { - if (p_s_tb->FEB[i]) { + if (tb->FEB[i]) { if (!clear_all_dirty_bits - (p_s_tb->tb_sb, p_s_tb->FEB[i])) - locked = p_s_tb->FEB[i]; + (tb->tb_sb, tb->FEB[i])) + locked = tb->FEB[i]; } } @@ -2263,21 +2268,20 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) #ifdef CONFIG_REISERFS_CHECK repeat_counter++; if ((repeat_counter % 10000) == 0) { - reiserfs_warning(p_s_tb->tb_sb, "reiserfs-8200", + reiserfs_warning(tb->tb_sb, "reiserfs-8200", "too many iterations waiting " "for buffer to unlock " "(%b)", locked); /* Don't loop forever. Try to recover from possible error. */ - return (FILESYSTEM_CHANGED_TB(p_s_tb)) ? + return (FILESYSTEM_CHANGED_TB(tb)) ? REPEAT_SEARCH : CARRY_ON; } #endif __wait_on_buffer(locked); - if (FILESYSTEM_CHANGED_TB(p_s_tb)) { + if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; - } } } while (locked); @@ -2307,138 +2311,136 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) * tb tree_balance structure; * inum item number in S[h]; * pos_in_item - comment this if you can - * ins_ih & ins_sd are used when inserting + * ins_ih item head of item being inserted + * data inserted item or data to be pasted * Returns: 1 - schedule occurred while the function worked; * 0 - schedule didn't occur while the function worked; * -1 - if no_disk_space */ -int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted - const void *data // inserted item or data to be pasted - ) +int fix_nodes(int n_op_mode, struct tree_balance *tb, + struct item_head *p_s_ins_ih, const void *data) { - int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(p_s_tb->tb_path); + int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(tb->tb_path); int n_pos_in_item; /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared ** during wait_tb_buffers_run */ int wait_tb_buffers_run = 0; - struct buffer_head *p_s_tbS0 = PATH_PLAST_BUFFER(p_s_tb->tb_path); + struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); - ++REISERFS_SB(p_s_tb->tb_sb)->s_fix_nodes; + ++REISERFS_SB(tb->tb_sb)->s_fix_nodes; - n_pos_in_item = p_s_tb->tb_path->pos_in_item; + n_pos_in_item = tb->tb_path->pos_in_item; - p_s_tb->fs_gen = get_generation(p_s_tb->tb_sb); + tb->fs_gen = get_generation(tb->tb_sb); /* we prepare and log the super here so it will already be in the ** transaction when do_balance needs to change it. ** This way do_balance won't have to schedule when trying to prepare ** the super for logging */ - reiserfs_prepare_for_journal(p_s_tb->tb_sb, - SB_BUFFER_WITH_SB(p_s_tb->tb_sb), 1); - journal_mark_dirty(p_s_tb->transaction_handle, p_s_tb->tb_sb, - SB_BUFFER_WITH_SB(p_s_tb->tb_sb)); - if (FILESYSTEM_CHANGED_TB(p_s_tb)) + reiserfs_prepare_for_journal(tb->tb_sb, + SB_BUFFER_WITH_SB(tb->tb_sb), 1); + journal_mark_dirty(tb->transaction_handle, tb->tb_sb, + SB_BUFFER_WITH_SB(tb->tb_sb)); + if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; /* if it possible in indirect_to_direct conversion */ - if (buffer_locked(p_s_tbS0)) { - __wait_on_buffer(p_s_tbS0); - if (FILESYSTEM_CHANGED_TB(p_s_tb)) + if (buffer_locked(tbS0)) { + __wait_on_buffer(tbS0); + if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; } #ifdef CONFIG_REISERFS_CHECK if (cur_tb) { print_cur_tb("fix_nodes"); - reiserfs_panic(p_s_tb->tb_sb, "PAP-8305", + reiserfs_panic(tb->tb_sb, "PAP-8305", "there is pending do_balance"); } - if (!buffer_uptodate(p_s_tbS0) || !B_IS_IN_TREE(p_s_tbS0)) { - reiserfs_panic(p_s_tb->tb_sb, "PAP-8320", "S[0] (%b %z) is " + if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0)) + reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is " "not uptodate at the beginning of fix_nodes " "or not in tree (mode %c)", - p_s_tbS0, p_s_tbS0, n_op_mode); - } + tbS0, tbS0, n_op_mode); /* Check parameters. */ switch (n_op_mode) { case M_INSERT: - if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(p_s_tbS0)) - reiserfs_panic(p_s_tb->tb_sb, "PAP-8330", "Incorrect " + if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(tbS0)) + reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect " "item number %d (in S0 - %d) in case " "of insert", n_item_num, - B_NR_ITEMS(p_s_tbS0)); + B_NR_ITEMS(tbS0)); break; case M_PASTE: case M_DELETE: case M_CUT: - if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(p_s_tbS0)) { - print_block(p_s_tbS0, 0, -1, -1); - reiserfs_panic(p_s_tb->tb_sb, "PAP-8335", "Incorrect " + if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(tbS0)) { + print_block(tbS0, 0, -1, -1); + reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect " "item number(%d); mode = %c " "insert_size = %d", n_item_num, n_op_mode, - p_s_tb->insert_size[0]); + tb->insert_size[0]); } break; default: - reiserfs_panic(p_s_tb->tb_sb, "PAP-8340", "Incorrect mode " + reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode " "of operation"); } #endif - if (get_mem_for_virtual_node(p_s_tb) == REPEAT_SEARCH) + if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH) // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat return REPEAT_SEARCH; /* Starting from the leaf level; for all levels n_h of the tree. */ - for (n_h = 0; n_h < MAX_HEIGHT && p_s_tb->insert_size[n_h]; n_h++) { - if ((n_ret_value = get_direct_parent(p_s_tb, n_h)) != CARRY_ON) { + for (n_h = 0; n_h < MAX_HEIGHT && tb->insert_size[n_h]; n_h++) { + n_ret_value = get_direct_parent(tb, n_h); + if (n_ret_value != CARRY_ON) goto repeat; - } - if ((n_ret_value = - check_balance(n_op_mode, p_s_tb, n_h, n_item_num, - n_pos_in_item, p_s_ins_ih, - data)) != CARRY_ON) { + n_ret_value = check_balance(n_op_mode, tb, n_h, n_item_num, + n_pos_in_item, p_s_ins_ih, data); + if (n_ret_value != CARRY_ON) { if (n_ret_value == NO_BALANCING_NEEDED) { /* No balancing for higher levels needed. */ - if ((n_ret_value = - get_neighbors(p_s_tb, n_h)) != CARRY_ON) { + n_ret_value = get_neighbors(tb, n_h); + if (n_ret_value != CARRY_ON) goto repeat; - } if (n_h != MAX_HEIGHT - 1) - p_s_tb->insert_size[n_h + 1] = 0; + tb->insert_size[n_h + 1] = 0; /* ok, analysis and resource gathering are complete */ break; } goto repeat; } - if ((n_ret_value = get_neighbors(p_s_tb, n_h)) != CARRY_ON) { + n_ret_value = get_neighbors(tb, n_h); + if (n_ret_value != CARRY_ON) goto repeat; - } - if ((n_ret_value = get_empty_nodes(p_s_tb, n_h)) != CARRY_ON) { - goto repeat; /* No disk space, or schedule occurred and - analysis may be invalid and needs to be redone. */ - } + /* No disk space, or schedule occurred and analysis may be + * invalid and needs to be redone. */ + n_ret_value = get_empty_nodes(tb, n_h); + if (n_ret_value != CARRY_ON) + goto repeat; - if (!PATH_H_PBUFFER(p_s_tb->tb_path, n_h)) { + if (!PATH_H_PBUFFER(tb->tb_path, n_h)) { /* We have a positive insert size but no nodes exist on this level, this means that we are creating a new root. */ - RFALSE(p_s_tb->blknum[n_h] != 1, + RFALSE(tb->blknum[n_h] != 1, "PAP-8350: creating new empty root"); if (n_h < MAX_HEIGHT - 1) - p_s_tb->insert_size[n_h + 1] = 0; - } else if (!PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1)) { - if (p_s_tb->blknum[n_h] > 1) { + tb->insert_size[n_h + 1] = 0; + } else if (!PATH_H_PBUFFER(tb->tb_path, n_h + 1)) { + if (tb->blknum[n_h] > 1) { /* The tree needs to be grown, so this node S[n_h] which is the root node is split into two nodes, and a new node (S[n_h+1]) will be created to @@ -2447,19 +2449,20 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ RFALSE(n_h == MAX_HEIGHT - 1, "PAP-8355: attempt to create too high of a tree"); - p_s_tb->insert_size[n_h + 1] = + tb->insert_size[n_h + 1] = (DC_SIZE + - KEY_SIZE) * (p_s_tb->blknum[n_h] - 1) + + KEY_SIZE) * (tb->blknum[n_h] - 1) + DC_SIZE; } else if (n_h < MAX_HEIGHT - 1) - p_s_tb->insert_size[n_h + 1] = 0; + tb->insert_size[n_h + 1] = 0; } else - p_s_tb->insert_size[n_h + 1] = - (DC_SIZE + KEY_SIZE) * (p_s_tb->blknum[n_h] - 1); + tb->insert_size[n_h + 1] = + (DC_SIZE + KEY_SIZE) * (tb->blknum[n_h] - 1); } - if ((n_ret_value = wait_tb_buffers_until_unlocked(p_s_tb)) == CARRY_ON) { - if (FILESYSTEM_CHANGED_TB(p_s_tb)) { + n_ret_value = wait_tb_buffers_until_unlocked(tb); + if (n_ret_value == CARRY_ON) { + if (FILESYSTEM_CHANGED_TB(tb)) { wait_tb_buffers_run = 1; n_ret_value = REPEAT_SEARCH; goto repeat; @@ -2482,50 +2485,49 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ /* Release path buffers. */ if (wait_tb_buffers_run) { - pathrelse_and_restore(p_s_tb->tb_sb, p_s_tb->tb_path); + pathrelse_and_restore(tb->tb_sb, tb->tb_path); } else { - pathrelse(p_s_tb->tb_path); + pathrelse(tb->tb_path); } /* brelse all resources collected for balancing */ for (i = 0; i < MAX_HEIGHT; i++) { if (wait_tb_buffers_run) { - reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, - p_s_tb->L[i]); - reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, - p_s_tb->R[i]); - reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, - p_s_tb->FL[i]); - reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, - p_s_tb->FR[i]); - reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, - p_s_tb-> + reiserfs_restore_prepared_buffer(tb->tb_sb, + tb->L[i]); + reiserfs_restore_prepared_buffer(tb->tb_sb, + tb->R[i]); + reiserfs_restore_prepared_buffer(tb->tb_sb, + tb->FL[i]); + reiserfs_restore_prepared_buffer(tb->tb_sb, + tb->FR[i]); + reiserfs_restore_prepared_buffer(tb->tb_sb, + tb-> CFL[i]); - reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, - p_s_tb-> + reiserfs_restore_prepared_buffer(tb->tb_sb, + tb-> CFR[i]); } - brelse(p_s_tb->L[i]); - brelse(p_s_tb->R[i]); - brelse(p_s_tb->FL[i]); - brelse(p_s_tb->FR[i]); - brelse(p_s_tb->CFL[i]); - brelse(p_s_tb->CFR[i]); - - p_s_tb->L[i] = NULL; - p_s_tb->R[i] = NULL; - p_s_tb->FL[i] = NULL; - p_s_tb->FR[i] = NULL; - p_s_tb->CFL[i] = NULL; - p_s_tb->CFR[i] = NULL; + brelse(tb->L[i]); + brelse(tb->R[i]); + brelse(tb->FL[i]); + brelse(tb->FR[i]); + brelse(tb->CFL[i]); + brelse(tb->CFR[i]); + + tb->L[i] = NULL; + tb->R[i] = NULL; + tb->FL[i] = NULL; + tb->FR[i] = NULL; + tb->CFL[i] = NULL; + tb->CFR[i] = NULL; } if (wait_tb_buffers_run) { for (i = 0; i < MAX_FEB_SIZE; i++) { - if (p_s_tb->FEB[i]) { + if (tb->FEB[i]) reiserfs_restore_prepared_buffer - (p_s_tb->tb_sb, p_s_tb->FEB[i]); - } + (tb->tb_sb, tb->FEB[i]); } } return n_ret_value; @@ -2533,7 +2535,7 @@ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ } -/* Anatoly will probably forgive me renaming p_s_tb to tb. I just +/* Anatoly will probably forgive me renaming tb to tb. I just wanted to make lines shorter */ void unfix_nodes(struct tree_balance *tb) { diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 8f220fb777d7..5e867be559ea 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1063,17 +1063,17 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st } /* Calculate number of bytes which will be deleted or cut during balance */ -static int calc_deleted_bytes_number(struct tree_balance *p_s_tb, char c_mode) +static int calc_deleted_bytes_number(struct tree_balance *tb, char c_mode) { int n_del_size; - struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_tb->tb_path); + struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path); if (is_statdata_le_ih(p_le_ih)) return 0; n_del_size = (c_mode == - M_DELETE) ? ih_item_len(p_le_ih) : -p_s_tb->insert_size[0]; + M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0]; if (is_direntry_le_ih(p_le_ih)) { // return EMPTY_DIR_SIZE; /* We delete emty directoris only. */ // we can't use EMPTY_DIR_SIZE, as old format dirs have a different @@ -1083,25 +1083,26 @@ static int calc_deleted_bytes_number(struct tree_balance *p_s_tb, char c_mode) } if (is_indirect_le_ih(p_le_ih)) - n_del_size = (n_del_size / UNFM_P_SIZE) * (PATH_PLAST_BUFFER(p_s_tb->tb_path)->b_size); // - get_ih_free_space (p_le_ih); + n_del_size = (n_del_size / UNFM_P_SIZE) * + (PATH_PLAST_BUFFER(tb->tb_path)->b_size); return n_del_size; } static void init_tb_struct(struct reiserfs_transaction_handle *th, - struct tree_balance *p_s_tb, + struct tree_balance *tb, struct super_block *sb, struct treepath *p_s_path, int n_size) { BUG_ON(!th->t_trans_id); - memset(p_s_tb, '\0', sizeof(struct tree_balance)); - p_s_tb->transaction_handle = th; - p_s_tb->tb_sb = sb; - p_s_tb->tb_path = p_s_path; + memset(tb, '\0', sizeof(struct tree_balance)); + tb->transaction_handle = th; + tb->tb_sb = sb; + tb->tb_path = p_s_path; PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; - p_s_tb->insert_size[0] = n_size; + tb->insert_size[0] = n_size; } void padd_item(char *item, int total_length, int length) diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 3192dc793226..b72dc2095478 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -2004,7 +2004,7 @@ extern const struct address_space_operations reiserfs_address_space_operations; /* fix_nodes.c */ -int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, +int fix_nodes(int n_op_mode, struct tree_balance *tb, struct item_head *p_s_ins_ih, const void *); void unfix_nodes(struct tree_balance *); -- cgit v1.2.3-59-g8ed1b From d68caa9530a8ba54f97002e02bf6a0ad2462b8c0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:49 -0400 Subject: reiserfs: rename p_._ variables This patch is a simple s/p_._//g to the reiserfs code. This is the fifth in a series of patches to rip out some of the awful variable naming in reiserfs. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/file.c | 6 +- fs/reiserfs/fix_node.c | 169 ++++++++------- fs/reiserfs/stree.c | 472 +++++++++++++++++++++--------------------- fs/reiserfs/tail_conversion.c | 28 +-- include/linux/reiserfs_fs.h | 46 ++-- 5 files changed, 365 insertions(+), 356 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index a73579f66214..cde16429ff00 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -134,10 +134,10 @@ static void reiserfs_vfs_truncate_file(struct inode *inode) * be removed... */ -static int reiserfs_sync_file(struct file *p_s_filp, - struct dentry *p_s_dentry, int datasync) +static int reiserfs_sync_file(struct file *filp, + struct dentry *dentry, int datasync) { - struct inode *inode = p_s_dentry->d_inode; + struct inode *inode = dentry->d_inode; int n_err; int barrier_done; diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 5236a8829e31..d97a55574ba9 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -780,9 +780,9 @@ static void free_buffers_in_tb(struct tree_balance *tb) /* The function is NOT SCHEDULE-SAFE! */ static int get_empty_nodes(struct tree_balance *tb, int n_h) { - struct buffer_head *p_s_new_bh, - *p_s_Sh = PATH_H_PBUFFER(tb->tb_path, n_h); - b_blocknr_t *p_n_blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; + struct buffer_head *new_bh, + *Sh = PATH_H_PBUFFER(tb->tb_path, n_h); + b_blocknr_t *blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */ n_retval = CARRY_ON; struct super_block *sb = tb->tb_sb; @@ -810,8 +810,8 @@ static int get_empty_nodes(struct tree_balance *tb, int n_h) 1) : 0; /* Allocate missing empty blocks. */ - /* if p_s_Sh == 0 then we are getting a new root */ - n_amount_needed = (p_s_Sh) ? (tb->blknum[n_h] - 1) : 1; + /* if Sh == 0 then we are getting a new root */ + n_amount_needed = (Sh) ? (tb->blknum[n_h] - 1) : 1; /* Amount_needed = the amount that we need more than the amount that we have. */ if (n_amount_needed > n_number_of_freeblk) n_amount_needed -= n_number_of_freeblk; @@ -824,25 +824,25 @@ static int get_empty_nodes(struct tree_balance *tb, int n_h) return NO_DISK_SPACE; /* for each blocknumber we just got, get a buffer and stick it on FEB */ - for (p_n_blocknr = a_n_blocknrs, n_counter = 0; - n_counter < n_amount_needed; p_n_blocknr++, n_counter++) { + for (blocknr = a_n_blocknrs, n_counter = 0; + n_counter < n_amount_needed; blocknr++, n_counter++) { - RFALSE(!*p_n_blocknr, + RFALSE(!*blocknr, "PAP-8135: reiserfs_new_blocknrs failed when got new blocks"); - p_s_new_bh = sb_getblk(sb, *p_n_blocknr); - RFALSE(buffer_dirty(p_s_new_bh) || - buffer_journaled(p_s_new_bh) || - buffer_journal_dirty(p_s_new_bh), + new_bh = sb_getblk(sb, *blocknr); + RFALSE(buffer_dirty(new_bh) || + buffer_journaled(new_bh) || + buffer_journal_dirty(new_bh), "PAP-8140: journlaled or dirty buffer %b for the new block", - p_s_new_bh); + new_bh); /* Put empty buffers into the array. */ RFALSE(tb->FEB[tb->cur_blknum], "PAP-8141: busy slot for new buffer"); - set_buffer_journal_new(p_s_new_bh); - tb->FEB[tb->cur_blknum++] = p_s_new_bh; + set_buffer_journal_new(new_bh); + tb->FEB[tb->cur_blknum++] = new_bh; } if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb)) @@ -898,7 +898,7 @@ static int get_rfree(struct tree_balance *tb, int h) /* Check whether left neighbor is in memory. */ static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h) { - struct buffer_head *p_s_father, *left; + struct buffer_head *father, *left; struct super_block *sb = tb->tb_sb; b_blocknr_t n_left_neighbor_blocknr; int n_left_neighbor_position; @@ -908,18 +908,18 @@ static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h) return 0; /* Calculate father of the node to be balanced. */ - p_s_father = PATH_H_PBUFFER(tb->tb_path, n_h + 1); + father = PATH_H_PBUFFER(tb->tb_path, n_h + 1); - RFALSE(!p_s_father || - !B_IS_IN_TREE(p_s_father) || + RFALSE(!father || + !B_IS_IN_TREE(father) || !B_IS_IN_TREE(tb->FL[n_h]) || - !buffer_uptodate(p_s_father) || + !buffer_uptodate(father) || !buffer_uptodate(tb->FL[n_h]), "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", - p_s_father, tb->FL[n_h]); + father, tb->FL[n_h]); /* Get position of the pointer to the left neighbor into the left father. */ - n_left_neighbor_position = (p_s_father == tb->FL[n_h]) ? + n_left_neighbor_position = (father == tb->FL[n_h]) ? tb->lkey[n_h] : B_NR_ITEMS(tb->FL[n_h]); /* Get left neighbor block number. */ n_left_neighbor_blocknr = @@ -940,10 +940,10 @@ static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h) #define LEFT_PARENTS 'l' #define RIGHT_PARENTS 'r' -static void decrement_key(struct cpu_key *p_s_key) +static void decrement_key(struct cpu_key *key) { // call item specific function for this key - item_ops[cpu_key_k_type(p_s_key)]->decrement_key(p_s_key); + item_ops[cpu_key_k_type(key)]->decrement_key(key); } /* Calculate far left/right parent of the left/right neighbor of the current node, that @@ -956,17 +956,17 @@ static void decrement_key(struct cpu_key *p_s_key) */ static int get_far_parent(struct tree_balance *tb, int n_h, - struct buffer_head **pp_s_father, - struct buffer_head **pp_s_com_father, char c_lr_par) + struct buffer_head **pfather, + struct buffer_head **pcom_father, char c_lr_par) { - struct buffer_head *p_s_parent; + struct buffer_head *parent; INITIALIZE_PATH(s_path_to_neighbor_father); - struct treepath *p_s_path = tb->tb_path; + struct treepath *path = tb->tb_path; struct cpu_key s_lr_father_key; int n_counter, n_position = INT_MAX, n_first_last_position = 0, - n_path_offset = PATH_H_PATH_OFFSET(p_s_path, n_h); + n_path_offset = PATH_H_PATH_OFFSET(path, n_h); /* Starting from F[n_h] go upwards in the tree, and look for the common ancestor of F[n_h], and its neighbor l/r, that should be obtained. */ @@ -979,25 +979,25 @@ static int get_far_parent(struct tree_balance *tb, for (; n_counter > FIRST_PATH_ELEMENT_OFFSET; n_counter--) { /* Check whether parent of the current buffer in the path is really parent in the tree. */ if (!B_IS_IN_TREE - (p_s_parent = PATH_OFFSET_PBUFFER(p_s_path, n_counter - 1))) + (parent = PATH_OFFSET_PBUFFER(path, n_counter - 1))) return REPEAT_SEARCH; /* Check whether position in the parent is correct. */ if ((n_position = - PATH_OFFSET_POSITION(p_s_path, + PATH_OFFSET_POSITION(path, n_counter - 1)) > - B_NR_ITEMS(p_s_parent)) + B_NR_ITEMS(parent)) return REPEAT_SEARCH; /* Check whether parent at the path really points to the child. */ - if (B_N_CHILD_NUM(p_s_parent, n_position) != - PATH_OFFSET_PBUFFER(p_s_path, n_counter)->b_blocknr) + if (B_N_CHILD_NUM(parent, n_position) != + PATH_OFFSET_PBUFFER(path, n_counter)->b_blocknr) return REPEAT_SEARCH; /* Return delimiting key if position in the parent is not equal to first/last one. */ if (c_lr_par == RIGHT_PARENTS) - n_first_last_position = B_NR_ITEMS(p_s_parent); + n_first_last_position = B_NR_ITEMS(parent); if (n_position != n_first_last_position) { - *pp_s_com_father = p_s_parent; - get_bh(*pp_s_com_father); - /*(*pp_s_com_father = p_s_parent)->b_count++; */ + *pcom_father = parent; + get_bh(*pcom_father); + /*(*pcom_father = parent)->b_count++; */ break; } } @@ -1009,22 +1009,22 @@ static int get_far_parent(struct tree_balance *tb, (tb->tb_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { - *pp_s_father = *pp_s_com_father = NULL; + *pfather = *pcom_father = NULL; return CARRY_ON; } return REPEAT_SEARCH; } - RFALSE(B_LEVEL(*pp_s_com_father) <= DISK_LEAF_NODE_LEVEL, + RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL, "PAP-8185: (%b %z) level too small", - *pp_s_com_father, *pp_s_com_father); + *pcom_father, *pcom_father); /* Check whether the common parent is locked. */ - if (buffer_locked(*pp_s_com_father)) { - __wait_on_buffer(*pp_s_com_father); + if (buffer_locked(*pcom_father)) { + __wait_on_buffer(*pcom_father); if (FILESYSTEM_CHANGED_TB(tb)) { - brelse(*pp_s_com_father); + brelse(*pcom_father); return REPEAT_SEARCH; } } @@ -1034,7 +1034,7 @@ static int get_far_parent(struct tree_balance *tb, /* Form key to get parent of the left/right neighbor. */ le_key2cpu_key(&s_lr_father_key, - B_N_PDELIM_KEY(*pp_s_com_father, + B_N_PDELIM_KEY(*pcom_father, (c_lr_par == LEFT_PARENTS) ? (tb->lkey[n_h - 1] = n_position - @@ -1053,14 +1053,14 @@ static int get_far_parent(struct tree_balance *tb, if (FILESYSTEM_CHANGED_TB(tb)) { pathrelse(&s_path_to_neighbor_father); - brelse(*pp_s_com_father); + brelse(*pcom_father); return REPEAT_SEARCH; } - *pp_s_father = PATH_PLAST_BUFFER(&s_path_to_neighbor_father); + *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father); - RFALSE(B_LEVEL(*pp_s_father) != n_h + 1, - "PAP-8190: (%b %z) level too small", *pp_s_father, *pp_s_father); + RFALSE(B_LEVEL(*pfather) != n_h + 1, + "PAP-8190: (%b %z) level too small", *pfather, *pfather); RFALSE(s_path_to_neighbor_father.path_length < FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small"); @@ -1078,11 +1078,11 @@ static int get_far_parent(struct tree_balance *tb, */ static int get_parents(struct tree_balance *tb, int n_h) { - struct treepath *p_s_path = tb->tb_path; + struct treepath *path = tb->tb_path; int n_position, n_ret_value, n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h); - struct buffer_head *p_s_curf, *p_s_curcf; + struct buffer_head *curf, *curcf; /* Current node is the root of the tree or will be root of the tree */ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) { @@ -1100,66 +1100,65 @@ static int get_parents(struct tree_balance *tb, int n_h) } /* Get parent FL[n_path_offset] of L[n_path_offset]. */ - if ((n_position = PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1))) { + n_position = PATH_OFFSET_POSITION(path, n_path_offset - 1); + if (n_position) { /* Current node is not the first child of its parent. */ - /*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2; */ - p_s_curf = p_s_curcf = - PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1); - get_bh(p_s_curf); - get_bh(p_s_curf); + curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); + curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); + get_bh(curf); + get_bh(curf); tb->lkey[n_h] = n_position - 1; } else { /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node. Calculate current common parent of L[n_path_offset] and the current node. Note that CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset]. Calculate lkey[n_path_offset]. */ - if ((n_ret_value = get_far_parent(tb, n_h + 1, &p_s_curf, - &p_s_curcf, + if ((n_ret_value = get_far_parent(tb, n_h + 1, &curf, + &curcf, LEFT_PARENTS)) != CARRY_ON) return n_ret_value; } brelse(tb->FL[n_h]); - tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */ + tb->FL[n_h] = curf; /* New initialization of FL[n_h]. */ brelse(tb->CFL[n_h]); - tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */ + tb->CFL[n_h] = curcf; /* New initialization of CFL[n_h]. */ - RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) || - (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)), - "PAP-8195: FL (%b) or CFL (%b) is invalid", p_s_curf, p_s_curcf); + RFALSE((curf && !B_IS_IN_TREE(curf)) || + (curcf && !B_IS_IN_TREE(curcf)), + "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf); /* Get parent FR[n_h] of R[n_h]. */ /* Current node is the last child of F[n_h]. FR[n_h] != F[n_h]. */ - if (n_position == B_NR_ITEMS(PATH_H_PBUFFER(p_s_path, n_h + 1))) { + if (n_position == B_NR_ITEMS(PATH_H_PBUFFER(path, n_h + 1))) { /* Calculate current parent of R[n_h], which is the right neighbor of F[n_h]. Calculate current common parent of R[n_h] and current node. Note that CFR[n_h] not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */ if ((n_ret_value = - get_far_parent(tb, n_h + 1, &p_s_curf, &p_s_curcf, + get_far_parent(tb, n_h + 1, &curf, &curcf, RIGHT_PARENTS)) != CARRY_ON) return n_ret_value; } else { /* Current node is not the last child of its parent F[n_h]. */ - /*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2; */ - p_s_curf = p_s_curcf = - PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1); - get_bh(p_s_curf); - get_bh(p_s_curf); + curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); + curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); + get_bh(curf); + get_bh(curf); tb->rkey[n_h] = n_position; } brelse(tb->FR[n_h]); /* New initialization of FR[n_path_offset]. */ - tb->FR[n_h] = p_s_curf; + tb->FR[n_h] = curf; brelse(tb->CFR[n_h]); /* New initialization of CFR[n_path_offset]. */ - tb->CFR[n_h] = p_s_curcf; + tb->CFR[n_h] = curcf; - RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) || - (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)), - "PAP-8205: FR (%b) or CFR (%b) is invalid", p_s_curf, p_s_curcf); + RFALSE((curf && !B_IS_IN_TREE(curf)) || + (curcf && !B_IS_IN_TREE(curcf)), + "PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf); return CARRY_ON; } @@ -1893,7 +1892,7 @@ static int check_balance(int mode, static int get_direct_parent(struct tree_balance *tb, int n_h) { struct buffer_head *bh; - struct treepath *p_s_path = tb->tb_path; + struct treepath *path = tb->tb_path; int n_position, n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h); @@ -1903,27 +1902,27 @@ static int get_direct_parent(struct tree_balance *tb, int n_h) RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET - 1, "PAP-8260: invalid offset in the path"); - if (PATH_OFFSET_PBUFFER(p_s_path, FIRST_PATH_ELEMENT_OFFSET)-> + if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { /* Root is not changed. */ - PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1) = NULL; - PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1) = 0; + PATH_OFFSET_PBUFFER(path, n_path_offset - 1) = NULL; + PATH_OFFSET_POSITION(path, n_path_offset - 1) = 0; return CARRY_ON; } return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */ } if (!B_IS_IN_TREE - (bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))) + (bh = PATH_OFFSET_PBUFFER(path, n_path_offset - 1))) return REPEAT_SEARCH; /* Parent in the path is not in the tree. */ if ((n_position = - PATH_OFFSET_POSITION(p_s_path, + PATH_OFFSET_POSITION(path, n_path_offset - 1)) > B_NR_ITEMS(bh)) return REPEAT_SEARCH; if (B_N_CHILD_NUM(bh, n_position) != - PATH_OFFSET_PBUFFER(p_s_path, n_path_offset)->b_blocknr) + PATH_OFFSET_PBUFFER(path, n_path_offset)->b_blocknr) /* Parent in the path is not parent of the current node in the tree. */ return REPEAT_SEARCH; @@ -2319,7 +2318,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) */ int fix_nodes(int n_op_mode, struct tree_balance *tb, - struct item_head *p_s_ins_ih, const void *data) + struct item_head *ins_ih, const void *data) { int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(tb->tb_path); int n_pos_in_item; @@ -2405,7 +2404,7 @@ int fix_nodes(int n_op_mode, struct tree_balance *tb, goto repeat; n_ret_value = check_balance(n_op_mode, tb, n_h, n_item_num, - n_pos_in_item, p_s_ins_ih, data); + n_pos_in_item, ins_ih, data); if (n_ret_value != CARRY_ON) { if (n_ret_value == NO_BALANCING_NEEDED) { /* No balancing for higher levels needed. */ diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 5e867be559ea..fd769c8dac32 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -68,10 +68,10 @@ inline int B_IS_IN_TREE(const struct buffer_head *bh) // // to gets item head in le form // -inline void copy_item_head(struct item_head *p_v_to, - const struct item_head *p_v_from) +inline void copy_item_head(struct item_head *to, + const struct item_head *from) { - memcpy(p_v_to, p_v_from, IH_SIZE); + memcpy(to, from, IH_SIZE); } /* k1 is pointer to on-disk structure which is stored in little-endian @@ -135,15 +135,15 @@ static inline int comp_keys(const struct reiserfs_key *le_key, inline int comp_short_le_keys(const struct reiserfs_key *key1, const struct reiserfs_key *key2) { - __u32 *p_s_1_u32, *p_s_2_u32; + __u32 *k1_u32, *k2_u32; int n_key_length = REISERFS_SHORT_KEY_LEN; - p_s_1_u32 = (__u32 *) key1; - p_s_2_u32 = (__u32 *) key2; - for (; n_key_length--; ++p_s_1_u32, ++p_s_2_u32) { - if (le32_to_cpu(*p_s_1_u32) < le32_to_cpu(*p_s_2_u32)) + k1_u32 = (__u32 *) key1; + k2_u32 = (__u32 *) key2; + for (; n_key_length--; ++k1_u32, ++k2_u32) { + if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32)) return -1; - if (le32_to_cpu(*p_s_1_u32) > le32_to_cpu(*p_s_2_u32)) + if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32)) return 1; } return 0; @@ -174,8 +174,8 @@ inline int comp_le_keys(const struct reiserfs_key *k1, * Binary search toolkit function * * Search for an item in the array by the item key * * Returns: 1 if found, 0 if not found; * - * *p_n_pos = number of the searched element if found, else the * - * number of the first element that is larger than p_v_key. * + * *pos = number of the searched element if found, else the * + * number of the first element that is larger than key. * **************************************************************************/ /* For those not familiar with binary search: n_lbound is the leftmost item that it could be, n_rbound the rightmost item that it could be. We examine the item @@ -184,28 +184,28 @@ inline int comp_le_keys(const struct reiserfs_key *k1, there are no possible items, and we have not found it. With each examination we cut the number of possible items it could be by one more than half rounded down, or we find it. */ -static inline int bin_search(const void *p_v_key, /* Key to search for. */ - const void *p_v_base, /* First item in the array. */ - int p_n_num, /* Number of items in the array. */ - int p_n_width, /* Item size in the array. - searched. Lest the reader be - confused, note that this is crafted - as a general function, and when it - is applied specifically to the array - of item headers in a node, p_n_width - is actually the item header size not - the item size. */ - int *p_n_pos /* Number of the searched for element. */ +static inline int bin_search(const void *key, /* Key to search for. */ + const void *base, /* First item in the array. */ + int num, /* Number of items in the array. */ + int width, /* Item size in the array. + searched. Lest the reader be + confused, note that this is crafted + as a general function, and when it + is applied specifically to the array + of item headers in a node, width + is actually the item header size not + the item size. */ + int *pos /* Number of the searched for element. */ ) { int n_rbound, n_lbound, n_j; - for (n_j = ((n_rbound = p_n_num - 1) + (n_lbound = 0)) / 2; + for (n_j = ((n_rbound = num - 1) + (n_lbound = 0)) / 2; n_lbound <= n_rbound; n_j = (n_rbound + n_lbound) / 2) switch (comp_keys - ((struct reiserfs_key *)((char *)p_v_base + - n_j * p_n_width), - (struct cpu_key *)p_v_key)) { + ((struct reiserfs_key *)((char *)base + + n_j * width), + (struct cpu_key *)key)) { case -1: n_lbound = n_j + 1; continue; @@ -213,13 +213,13 @@ static inline int bin_search(const void *p_v_key, /* Key to search for. n_rbound = n_j - 1; continue; case 0: - *p_n_pos = n_j; + *pos = n_j; return ITEM_FOUND; /* Key found in the array. */ } /* bin_search did not find given key, it returns position of key, that is minimal and greater than the given one. */ - *p_n_pos = n_lbound; + *pos = n_lbound; return ITEM_NOT_FOUND; } @@ -243,12 +243,12 @@ static const struct reiserfs_key MAX_KEY = { the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this case we return a special key, either MIN_KEY or MAX_KEY. */ static inline const struct reiserfs_key *get_lkey(const struct treepath - *p_s_chk_path, + *chk_path, const struct super_block *sb) { - int n_position, n_path_offset = p_s_chk_path->path_length; - struct buffer_head *p_s_parent; + int n_position, n_path_offset = chk_path->path_length; + struct buffer_head *parent; RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5010: invalid offset in the path"); @@ -257,42 +257,42 @@ static inline const struct reiserfs_key *get_lkey(const struct treepath while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate - (PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)), + (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)), "PAP-5020: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE - (p_s_parent = - PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset))) + (parent = + PATH_OFFSET_PBUFFER(chk_path, n_path_offset))) return &MAX_KEY; /* Check whether position in the parent is correct. */ if ((n_position = - PATH_OFFSET_POSITION(p_s_chk_path, + PATH_OFFSET_POSITION(chk_path, n_path_offset)) > - B_NR_ITEMS(p_s_parent)) + B_NR_ITEMS(parent)) return &MAX_KEY; /* Check whether parent at the path really points to the child. */ - if (B_N_CHILD_NUM(p_s_parent, n_position) != - PATH_OFFSET_PBUFFER(p_s_chk_path, + if (B_N_CHILD_NUM(parent, n_position) != + PATH_OFFSET_PBUFFER(chk_path, n_path_offset + 1)->b_blocknr) return &MAX_KEY; /* Return delimiting key if position in the parent is not equal to zero. */ if (n_position) - return B_N_PDELIM_KEY(p_s_parent, n_position - 1); + return B_N_PDELIM_KEY(parent, n_position - 1); } /* Return MIN_KEY if we are in the root of the buffer tree. */ - if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)-> + if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(sb)) return &MIN_KEY; return &MAX_KEY; } /* Get delimiting key of the buffer at the path and its right neighbor. */ -inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, +inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path, const struct super_block *sb) { - int n_position, n_path_offset = p_s_chk_path->path_length; - struct buffer_head *p_s_parent; + int n_position, n_path_offset = chk_path->path_length; + struct buffer_head *parent; RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5030: invalid offset in the path"); @@ -300,31 +300,31 @@ inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate - (PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)), + (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)), "PAP-5040: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE - (p_s_parent = - PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset))) + (parent = + PATH_OFFSET_PBUFFER(chk_path, n_path_offset))) return &MIN_KEY; /* Check whether position in the parent is correct. */ if ((n_position = - PATH_OFFSET_POSITION(p_s_chk_path, + PATH_OFFSET_POSITION(chk_path, n_path_offset)) > - B_NR_ITEMS(p_s_parent)) + B_NR_ITEMS(parent)) return &MIN_KEY; /* Check whether parent at the path really points to the child. */ - if (B_N_CHILD_NUM(p_s_parent, n_position) != - PATH_OFFSET_PBUFFER(p_s_chk_path, + if (B_N_CHILD_NUM(parent, n_position) != + PATH_OFFSET_PBUFFER(chk_path, n_path_offset + 1)->b_blocknr) return &MIN_KEY; /* Return delimiting key if position in the parent is not the last one. */ - if (n_position != B_NR_ITEMS(p_s_parent)) - return B_N_PDELIM_KEY(p_s_parent, n_position); + if (n_position != B_NR_ITEMS(parent)) + return B_N_PDELIM_KEY(parent, n_position); } /* Return MAX_KEY if we are in the root of the buffer tree. */ - if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)-> + if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(sb)) return &MAX_KEY; return &MIN_KEY; @@ -335,25 +335,25 @@ inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, the path. These delimiting keys are stored at least one level above that buffer in the tree. If the buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */ -static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which should be checked. */ - const struct cpu_key *p_s_key, /* Key which should be checked. */ - struct super_block *sb /* Super block pointer. */ +static inline int key_in_buffer(struct treepath *chk_path, /* Path which should be checked. */ + const struct cpu_key *key, /* Key which should be checked. */ + struct super_block *sb ) { - RFALSE(!p_s_key || p_s_chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET - || p_s_chk_path->path_length > MAX_HEIGHT, + RFALSE(!key || chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET + || chk_path->path_length > MAX_HEIGHT, "PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)", - p_s_key, p_s_chk_path->path_length); - RFALSE(!PATH_PLAST_BUFFER(p_s_chk_path)->b_bdev, + key, chk_path->path_length); + RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev, "PAP-5060: device must not be NODEV"); - if (comp_keys(get_lkey(p_s_chk_path, sb), p_s_key) == 1) + if (comp_keys(get_lkey(chk_path, sb), key) == 1) /* left delimiting key is bigger, that the key we look for */ return 0; - // if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, sb)) != -1 ) - if (comp_keys(get_rkey(p_s_chk_path, sb), p_s_key) != 1) - /* p_s_key must be less than right delimitiing key */ + /* if ( comp_keys(key, get_rkey(chk_path, sb)) != -1 ) */ + if (comp_keys(get_rkey(chk_path, sb), key) != 1) + /* key must be less than right delimitiing key */ return 0; return 1; } @@ -369,34 +369,34 @@ int reiserfs_check_path(struct treepath *p) * dirty bits clean when preparing the buffer for the log. * This version should only be called from fix_nodes() */ void pathrelse_and_restore(struct super_block *sb, - struct treepath *p_s_search_path) + struct treepath *search_path) { - int n_path_offset = p_s_search_path->path_length; + int n_path_offset = search_path->path_length; RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "clm-4000: invalid path offset"); while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { struct buffer_head *bh; - bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--); + bh = PATH_OFFSET_PBUFFER(search_path, n_path_offset--); reiserfs_restore_prepared_buffer(sb, bh); brelse(bh); } - p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; + search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } /* Drop the reference to each buffer in a path */ -void pathrelse(struct treepath *p_s_search_path) +void pathrelse(struct treepath *search_path) { - int n_path_offset = p_s_search_path->path_length; + int n_path_offset = search_path->path_length; RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "PAP-5090: invalid path offset"); while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) - brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--)); + brelse(PATH_OFFSET_PBUFFER(search_path, n_path_offset--)); - p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; + search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) @@ -547,9 +547,9 @@ static void search_by_key_reada(struct super_block *s, * Algorithm SearchByKey * * look for item in the Disk S+Tree by its key * * Input: sb - super block * - * p_s_key - pointer to the key to search * + * key - pointer to the key to search * * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR * - * p_s_search_path - path from the root to the needed leaf * + * search_path - path from the root to the needed leaf * **************************************************************************/ /* This function fills up the path from the root to the leaf as it @@ -566,8 +566,8 @@ static void search_by_key_reada(struct super_block *s, correctness of the top of the path but need not be checked for the correctness of the bottom of the path */ /* The function is NOT SCHEDULE-SAFE! */ -int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key to search. */ - struct treepath *p_s_search_path,/* This structure was +int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to search. */ + struct treepath *search_path,/* This structure was allocated and initialized by the calling function. It is filled up @@ -580,7 +580,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key b_blocknr_t n_block_number; int expected_level; struct buffer_head *bh; - struct path_element *p_s_last_element; + struct path_element *last_element; int n_node_level, n_retval; int right_neighbor_of_leaf_node; int fs_gen; @@ -598,7 +598,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key we must be careful to release all nodes in a path before we either discard the path struct or re-use the path struct, as we do here. */ - pathrelse(p_s_search_path); + pathrelse(search_path); right_neighbor_of_leaf_node = 0; @@ -615,18 +615,18 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key "%s: there were %d iterations of " "while loop looking for key %K", current->comm, n_repeat_counter, - p_s_key); + key); #endif /* prep path to have another element added to it. */ - p_s_last_element = - PATH_OFFSET_PELEMENT(p_s_search_path, - ++p_s_search_path->path_length); + last_element = + PATH_OFFSET_PELEMENT(search_path, + ++search_path->path_length); fs_gen = get_generation(sb); /* Read the next tree node, and set the last element in the path to have a pointer to it. */ - if ((bh = p_s_last_element->pe_buffer = + if ((bh = last_element->pe_buffer = sb_getblk(sb, n_block_number))) { if (!buffer_uptodate(bh) && reada_count > 1) search_by_key_reada(sb, reada_bh, @@ -637,8 +637,8 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key goto io_error; } else { io_error: - p_s_search_path->path_length--; - pathrelse(p_s_search_path); + search_path->path_length--; + pathrelse(search_path); return IO_ERROR; } reada_count = 0; @@ -652,12 +652,12 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key if (fs_changed(fs_gen, sb) && (!B_IS_IN_TREE(bh) || B_LEVEL(bh) != expected_level || - !key_in_buffer(p_s_search_path, p_s_key, sb))) { + !key_in_buffer(search_path, key, sb))) { PROC_INFO_INC(sb, search_by_key_fs_changed); PROC_INFO_INC(sb, search_by_key_restarted); PROC_INFO_INC(sb, sbk_restarted[expected_level - 1]); - pathrelse(p_s_search_path); + pathrelse(search_path); /* Get the root block number so that we can repeat the search starting from the root. */ @@ -669,11 +669,11 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key continue; } - /* only check that the key is in the buffer if p_s_key is not + /* only check that the key is in the buffer if key is not equal to the MAX_KEY. Latter case is only possible in "finish_unfinished()" processing during mount. */ - RFALSE(comp_keys(&MAX_KEY, p_s_key) && - !key_in_buffer(p_s_search_path, p_s_key, sb), + RFALSE(comp_keys(&MAX_KEY, key) && + !key_in_buffer(search_path, key, sb), "PAP-5130: key is not in the buffer"); #ifdef CONFIG_REISERFS_CHECK if (cur_tb) { @@ -689,7 +689,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key reiserfs_error(sb, "vs-5150", "invalid format found in block %ld. " "Fsck?", bh->b_blocknr); - pathrelse(p_s_search_path); + pathrelse(search_path); return IO_ERROR; } @@ -702,12 +702,12 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key "vs-5152: tree level (%d) is less than stop level (%d)", n_node_level, n_stop_level); - n_retval = bin_search(p_s_key, B_N_PITEM_HEAD(bh, 0), + n_retval = bin_search(key, B_N_PITEM_HEAD(bh, 0), B_NR_ITEMS(bh), (n_node_level == DISK_LEAF_NODE_LEVEL) ? IH_SIZE : KEY_SIZE, - &(p_s_last_element->pe_position)); + &(last_element->pe_position)); if (n_node_level == n_stop_level) { return n_retval; } @@ -715,7 +715,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key /* we are not in the stop level */ if (n_retval == ITEM_FOUND) /* item has been found, so we choose the pointer which is to the right of the found one */ - p_s_last_element->pe_position++; + last_element->pe_position++; /* if item was not found we choose the position which is to the left of the found item. This requires no code, @@ -725,23 +725,23 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key an internal node. Now we calculate child block number by position in the node. */ n_block_number = - B_N_CHILD_NUM(bh, p_s_last_element->pe_position); + B_N_CHILD_NUM(bh, last_element->pe_position); /* if we are going to read leaf nodes, try for read ahead as well */ - if ((p_s_search_path->reada & PATH_READA) && + if ((search_path->reada & PATH_READA) && n_node_level == DISK_LEAF_NODE_LEVEL + 1) { - int pos = p_s_last_element->pe_position; + int pos = last_element->pe_position; int limit = B_NR_ITEMS(bh); struct reiserfs_key *le_key; - if (p_s_search_path->reada & PATH_READA_BACK) + if (search_path->reada & PATH_READA_BACK) limit = 0; while (reada_count < SEARCH_BY_KEY_READA) { if (pos == limit) break; reada_blocks[reada_count++] = B_N_CHILD_NUM(bh, pos); - if (p_s_search_path->reada & PATH_READA_BACK) + if (search_path->reada & PATH_READA_BACK) pos--; else pos++; @@ -751,7 +751,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key */ le_key = B_N_PDELIM_KEY(bh, pos); if (le32_to_cpu(le_key->k_objectid) != - p_s_key->on_disk_key.k_objectid) { + key->on_disk_key.k_objectid) { break; } } @@ -760,11 +760,11 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key } /* Form the path to an item and position in this item which contains - file byte defined by p_s_key. If there is no such item + file byte defined by key. If there is no such item corresponding to the key, we point the path to the item with - maximal key less than p_s_key, and *p_n_pos_in_item is set to one + maximal key less than key, and *pos_in_item is set to one past the last entry/byte in the item. If searching for entry in a - directory item, and it is not found, *p_n_pos_in_item is set to one + directory item, and it is not found, *pos_in_item is set to one entry more than the entry with maximal key which is less than the sought key. @@ -777,7 +777,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key /* The function is NOT SCHEDULE-SAFE! */ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */ const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */ - struct treepath *p_s_search_path /* Filled up by this function. */ + struct treepath *search_path /* Filled up by this function. */ ) { struct item_head *p_le_ih; /* pointer to on-disk structure */ @@ -788,34 +788,34 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b /* If searching for directory entry. */ if (is_direntry_cpu_key(p_cpu_key)) - return search_by_entry_key(sb, p_cpu_key, p_s_search_path, + return search_by_entry_key(sb, p_cpu_key, search_path, &de); /* If not searching for directory entry. */ /* If item is found. */ - retval = search_item(sb, p_cpu_key, p_s_search_path); + retval = search_item(sb, p_cpu_key, search_path); if (retval == IO_ERROR) return retval; if (retval == ITEM_FOUND) { RFALSE(!ih_item_len (B_N_PITEM_HEAD - (PATH_PLAST_BUFFER(p_s_search_path), - PATH_LAST_POSITION(p_s_search_path))), + (PATH_PLAST_BUFFER(search_path), + PATH_LAST_POSITION(search_path))), "PAP-5165: item length equals zero"); - pos_in_item(p_s_search_path) = 0; + pos_in_item(search_path) = 0; return POSITION_FOUND; } - RFALSE(!PATH_LAST_POSITION(p_s_search_path), + RFALSE(!PATH_LAST_POSITION(search_path), "PAP-5170: position equals zero"); /* Item is not found. Set path to the previous item. */ p_le_ih = - B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path), - --PATH_LAST_POSITION(p_s_search_path)); + B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path), + --PATH_LAST_POSITION(search_path)); n_blk_size = sb->s_blocksize; if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) { @@ -829,9 +829,9 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b /* Needed byte is contained in the item pointed to by the path. */ if (item_offset <= offset && item_offset + op_bytes_number(p_le_ih, n_blk_size) > offset) { - pos_in_item(p_s_search_path) = offset - item_offset; + pos_in_item(search_path) = offset - item_offset; if (is_indirect_le_ih(p_le_ih)) { - pos_in_item(p_s_search_path) /= n_blk_size; + pos_in_item(search_path) /= n_blk_size; } return POSITION_FOUND; } @@ -839,18 +839,18 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b /* Needed byte is not contained in the item pointed to by the path. Set pos_in_item out of the item. */ if (is_indirect_le_ih(p_le_ih)) - pos_in_item(p_s_search_path) = + pos_in_item(search_path) = ih_item_len(p_le_ih) / UNFM_P_SIZE; else - pos_in_item(p_s_search_path) = ih_item_len(p_le_ih); + pos_in_item(search_path) = ih_item_len(p_le_ih); return POSITION_NOT_FOUND; } /* Compare given item and item pointed to by the path. */ -int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path) +int comp_items(const struct item_head *stored_ih, const struct treepath *path) { - struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path); + struct buffer_head *bh = PATH_PLAST_BUFFER(path); struct item_head *ih; /* Last buffer at the path is not in the tree. */ @@ -858,11 +858,11 @@ int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_pat return 1; /* Last path position is invalid. */ - if (PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(bh)) + if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh)) return 1; /* we need only to know, whether it is the same item */ - ih = get_ih(p_s_path); + ih = get_ih(path); return memcmp(stored_ih, ih, IH_SIZE); } @@ -951,14 +951,14 @@ static inline int prepare_for_direntry_item(struct treepath *path, In case of file truncate calculate whether this item must be deleted/truncated or last unformatted node of this item will be converted to a direct item. This function returns a determination of what balance mode the calling function should employ. */ -static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *p_s_path, const struct cpu_key *p_s_item_key, int *p_n_removed, /* Number of unformatted nodes which were removed +static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed from end of the file. */ - int *p_n_cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */ + int *cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */ ) { struct super_block *sb = inode->i_sb; - struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_path); - struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path); + struct item_head *p_le_ih = PATH_PITEM_HEAD(path); + struct buffer_head *bh = PATH_PLAST_BUFFER(path); BUG_ON(!th->t_trans_id); @@ -968,20 +968,20 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st RFALSE(n_new_file_length != max_reiserfs_offset(inode), "PAP-5210: mode must be M_DELETE"); - *p_n_cut_size = -(IH_SIZE + ih_item_len(p_le_ih)); + *cut_size = -(IH_SIZE + ih_item_len(p_le_ih)); return M_DELETE; } /* Directory item. */ if (is_direntry_le_ih(p_le_ih)) - return prepare_for_direntry_item(p_s_path, p_le_ih, inode, + return prepare_for_direntry_item(path, p_le_ih, inode, n_new_file_length, - p_n_cut_size); + cut_size); /* Direct item. */ if (is_direct_le_ih(p_le_ih)) - return prepare_for_direct_item(p_s_path, p_le_ih, inode, - n_new_file_length, p_n_cut_size); + return prepare_for_direct_item(path, p_le_ih, inode, + n_new_file_length, cut_size); /* Case of an indirect item. */ { @@ -1001,9 +1001,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st do { need_re_search = 0; - *p_n_cut_size = 0; - bh = PATH_PLAST_BUFFER(p_s_path); - copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); + *cut_size = 0; + bh = PATH_PLAST_BUFFER(path); + copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); pos = I_UNFM_NUM(&s_ih); while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) { @@ -1013,10 +1013,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st /* Each unformatted block deletion may involve one additional * bitmap block into the transaction, thereby the initial * journal space reservation might not be enough. */ - if (!delete && (*p_n_cut_size) != 0 && - reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) { + if (!delete && (*cut_size) != 0 && + reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) break; - } unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1; block = get_block_num(unfm, 0); @@ -1030,17 +1029,17 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st cond_resched(); - if (item_moved (&s_ih, p_s_path)) { + if (item_moved (&s_ih, path)) { need_re_search = 1; break; } pos --; - (*p_n_removed) ++; - (*p_n_cut_size) -= UNFM_P_SIZE; + (*removed)++; + (*cut_size) -= UNFM_P_SIZE; if (pos == 0) { - (*p_n_cut_size) -= IH_SIZE; + (*cut_size) -= IH_SIZE; result = M_DELETE; break; } @@ -1050,10 +1049,10 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st ** buffer */ reiserfs_restore_prepared_buffer(sb, bh); } while (need_re_search && - search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_FOUND); - pos_in_item(p_s_path) = pos * UNFM_P_SIZE; + search_for_position_by_key(sb, item_key, path) == POSITION_FOUND); + pos_in_item(path) = pos * UNFM_P_SIZE; - if (*p_n_cut_size == 0) { + if (*cut_size == 0) { /* Nothing were cut. maybe convert last unformatted node to the * direct item? */ result = M_CONVERT; @@ -1091,7 +1090,7 @@ static int calc_deleted_bytes_number(struct tree_balance *tb, char c_mode) static void init_tb_struct(struct reiserfs_transaction_handle *th, struct tree_balance *tb, struct super_block *sb, - struct treepath *p_s_path, int n_size) + struct treepath *path, int n_size) { BUG_ON(!th->t_trans_id); @@ -1099,9 +1098,9 @@ static void init_tb_struct(struct reiserfs_transaction_handle *th, memset(tb, '\0', sizeof(struct tree_balance)); tb->transaction_handle = th; tb->tb_sb = sb; - tb->tb_path = p_s_path; - PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; - PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; + tb->tb_path = path; + PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; + PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; tb->insert_size[0] = n_size; } @@ -1141,13 +1140,17 @@ char head2type(struct item_head *ih) } #endif -/* Delete object item. */ -int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_path, /* Path to the deleted item. */ - const struct cpu_key *p_s_item_key, /* Key to search for the deleted item. */ - struct inode *inode, /* inode is here just to update - * i_blocks and quotas */ - struct buffer_head *p_s_un_bh) -{ /* NULL or unformatted node pointer. */ +/* Delete object item. + * th - active transaction handle + * path - path to the deleted item + * item_key - key to search for the deleted item + * indode - used for updating i_blocks and quotas + * un_bh - NULL or unformatted node pointer + */ +int reiserfs_delete_item(struct reiserfs_transaction_handle *th, + struct treepath *path, const struct cpu_key *item_key, + struct inode *inode, struct buffer_head *un_bh) +{ struct super_block *sb = inode->i_sb; struct tree_balance s_del_balance; struct item_head s_ih; @@ -1162,7 +1165,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath BUG_ON(!th->t_trans_id); - init_tb_struct(th, &s_del_balance, sb, p_s_path, + init_tb_struct(th, &s_del_balance, sb, path, 0 /*size is unknown */ ); while (1) { @@ -1172,14 +1175,14 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath n_iter++; c_mode = #endif - prepare_for_delete_or_cut(th, inode, p_s_path, - p_s_item_key, &n_removed, + prepare_for_delete_or_cut(th, inode, path, + item_key, &n_removed, &n_del_size, max_reiserfs_offset(inode)); RFALSE(c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE"); - copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); + copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); s_del_balance.insert_size[0] = n_del_size; n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL); @@ -1190,13 +1193,13 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath // file system changed, repeat search n_ret_value = - search_for_position_by_key(sb, p_s_item_key, p_s_path); + search_for_position_by_key(sb, item_key, path); if (n_ret_value == IO_ERROR) break; if (n_ret_value == FILE_NOT_FOUND) { reiserfs_warning(sb, "vs-5340", "no items of the file %K found", - p_s_item_key); + item_key); break; } } /* while (1) */ @@ -1207,7 +1210,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath } // reiserfs_delete_item returns item length when success n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE); - q_ih = get_ih(p_s_path); + q_ih = get_ih(path); quota_cut_bytes = ih_item_len(q_ih); /* hack so the quota code doesn't have to guess if the file @@ -1224,7 +1227,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath } } - if (p_s_un_bh) { + if (un_bh) { int off; char *data; @@ -1242,16 +1245,16 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath ** The unformatted node must be dirtied later on. We can't be ** sure here if the entire tail has been deleted yet. ** - ** p_s_un_bh is from the page cache (all unformatted nodes are + ** un_bh is from the page cache (all unformatted nodes are ** from the page cache) and might be a highmem page. So, we - ** can't use p_s_un_bh->b_data. + ** can't use un_bh->b_data. ** -clm */ - data = kmap_atomic(p_s_un_bh->b_page, KM_USER0); + data = kmap_atomic(un_bh->b_page, KM_USER0); off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); memcpy(data + off, - B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), + B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), n_ret_value); kunmap_atomic(data, KM_USER0); } @@ -1427,9 +1430,9 @@ static void unmap_buffers(struct page *page, loff_t pos) static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, struct inode *inode, struct page *page, - struct treepath *p_s_path, - const struct cpu_key *p_s_item_key, - loff_t n_new_file_size, char *p_c_mode) + struct treepath *path, + const struct cpu_key *item_key, + loff_t n_new_file_size, char *mode) { struct super_block *sb = inode->i_sb; int n_block_size = sb->s_blocksize; @@ -1445,17 +1448,17 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, !tail_has_to_be_packed(inode) || !page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) { /* leave tail in an unformatted node */ - *p_c_mode = M_SKIP_BALANCING; + *mode = M_SKIP_BALANCING; cut_bytes = n_block_size - (n_new_file_size & (n_block_size - 1)); - pathrelse(p_s_path); + pathrelse(path); return cut_bytes; } - /* Permorm the conversion to a direct_item. */ - /* return indirect_to_direct(inode, p_s_path, p_s_item_key, - n_new_file_size, p_c_mode); */ - return indirect2direct(th, inode, page, p_s_path, p_s_item_key, - n_new_file_size, p_c_mode); + /* Perform the conversion to a direct_item. */ + /* return indirect_to_direct(inode, path, item_key, + n_new_file_size, mode); */ + return indirect2direct(th, inode, page, path, item_key, + n_new_file_size, mode); } /* we did indirect_to_direct conversion. And we have inserted direct @@ -1506,8 +1509,8 @@ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th, /* (Truncate or cut entry) or delete object item. Returns < 0 on failure */ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, - struct treepath *p_s_path, - struct cpu_key *p_s_item_key, + struct treepath *path, + struct cpu_key *item_key, struct inode *inode, struct page *page, loff_t n_new_file_size) { @@ -1528,7 +1531,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); - init_tb_struct(th, &s_cut_balance, inode->i_sb, p_s_path, + init_tb_struct(th, &s_cut_balance, inode->i_sb, path, n_cut_size); /* Repeat this loop until we either cut the item without needing @@ -1540,8 +1543,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, pointers. */ c_mode = - prepare_for_delete_or_cut(th, inode, p_s_path, - p_s_item_key, &n_removed, + prepare_for_delete_or_cut(th, inode, path, + item_key, &n_removed, &n_cut_size, n_new_file_size); if (c_mode == M_CONVERT) { /* convert last unformatted node to direct item or leave @@ -1551,7 +1554,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, n_ret_value = maybe_indirect_to_direct(th, inode, page, - p_s_path, p_s_item_key, + path, item_key, n_new_file_size, &c_mode); if (c_mode == M_SKIP_BALANCING) /* tail has been left in the unformatted node */ @@ -1568,26 +1571,26 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, inserting the new direct item. Now we are removing the last unformatted node pointer. Set key to search for it. */ - set_cpu_key_k_type(p_s_item_key, TYPE_INDIRECT); - p_s_item_key->key_length = 4; + set_cpu_key_k_type(item_key, TYPE_INDIRECT); + item_key->key_length = 4; n_new_file_size -= (n_new_file_size & (sb->s_blocksize - 1)); tail_pos = n_new_file_size; - set_cpu_key_k_offset(p_s_item_key, n_new_file_size + 1); + set_cpu_key_k_offset(item_key, n_new_file_size + 1); if (search_for_position_by_key - (sb, p_s_item_key, - p_s_path) == POSITION_NOT_FOUND) { - print_block(PATH_PLAST_BUFFER(p_s_path), 3, - PATH_LAST_POSITION(p_s_path) - 1, - PATH_LAST_POSITION(p_s_path) + 1); + (sb, item_key, + path) == POSITION_NOT_FOUND) { + print_block(PATH_PLAST_BUFFER(path), 3, + PATH_LAST_POSITION(path) - 1, + PATH_LAST_POSITION(path) + 1); reiserfs_panic(sb, "PAP-5580", "item to " "convert does not exist (%K)", - p_s_item_key); + item_key); } continue; } if (n_cut_size == 0) { - pathrelse(p_s_path); + pathrelse(path); return 0; } @@ -1600,12 +1603,12 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, PROC_INFO_INC(sb, cut_from_item_restarted); n_ret_value = - search_for_position_by_key(sb, p_s_item_key, p_s_path); + search_for_position_by_key(sb, item_key, path); if (n_ret_value == POSITION_FOUND) continue; reiserfs_warning(sb, "PAP-5610", "item %K not found", - p_s_item_key); + item_key); unfix_nodes(&s_cut_balance); return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT; } /* while */ @@ -1615,7 +1618,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, if (n_is_inode_locked) { // FIXME: this seems to be not needed: we are always able // to cut item - indirect_to_direct_roll_back(th, inode, p_s_path); + indirect_to_direct_roll_back(th, inode, path); } if (n_ret_value == NO_DISK_SPACE) reiserfs_warning(sb, "reiserfs-5092", @@ -1631,7 +1634,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, /* Calculate number of bytes that need to be cut from the item. */ quota_cut_bytes = (c_mode == - M_DELETE) ? ih_item_len(get_ih(p_s_path)) : -s_cut_balance. + M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance. insert_size[0]; if (retval2 == -1) n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode); @@ -1878,7 +1881,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, #ifdef CONFIG_REISERFS_CHECK // this makes sure, that we __append__, not overwrite or add holes static void check_research_for_paste(struct treepath *path, - const struct cpu_key *p_s_key) + const struct cpu_key *key) { struct item_head *found_ih = get_ih(path); @@ -1886,35 +1889,35 @@ static void check_research_for_paste(struct treepath *path, if (le_ih_k_offset(found_ih) + op_bytes_number(found_ih, get_last_bh(path)->b_size) != - cpu_key_k_offset(p_s_key) + cpu_key_k_offset(key) || op_bytes_number(found_ih, get_last_bh(path)->b_size) != pos_in_item(path)) reiserfs_panic(NULL, "PAP-5720", "found direct item " "%h or position (%d) does not match " "to key %K", found_ih, - pos_in_item(path), p_s_key); + pos_in_item(path), key); } if (is_indirect_le_ih(found_ih)) { if (le_ih_k_offset(found_ih) + op_bytes_number(found_ih, get_last_bh(path)->b_size) != - cpu_key_k_offset(p_s_key) + cpu_key_k_offset(key) || I_UNFM_NUM(found_ih) != pos_in_item(path) || get_ih_free_space(found_ih) != 0) reiserfs_panic(NULL, "PAP-5730", "found indirect " "item (%h) or position (%d) does not " "match to key (%K)", - found_ih, pos_in_item(path), p_s_key); + found_ih, pos_in_item(path), key); } } #endif /* config reiserfs check */ /* Paste bytes to the existing item. Returns bytes number pasted into the item. */ -int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_search_path, /* Path to the pasted item. */ - const struct cpu_key *p_s_key, /* Key to search for the needed item. */ +int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path, /* Path to the pasted item. */ + const struct cpu_key *key, /* Key to search for the needed item. */ struct inode *inode, /* Inode item belongs to */ - const char *p_c_body, /* Pointer to the bytes to paste. */ + const char *body, /* Pointer to the bytes to paste. */ int n_pasted_size) { /* Size of pasted bytes. */ struct tree_balance s_paste_balance; @@ -1929,17 +1932,17 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): allocating %u id=%u type=%c", n_pasted_size, inode->i_uid, - key2type(&(p_s_key->on_disk_key))); + key2type(&(key->on_disk_key))); #endif if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) { - pathrelse(p_s_search_path); + pathrelse(search_path); return -EDQUOT; } - init_tb_struct(th, &s_paste_balance, th->t_super, p_s_search_path, + init_tb_struct(th, &s_paste_balance, th->t_super, search_path, n_pasted_size); #ifdef DISPLACE_NEW_PACKING_LOCALITIES - s_paste_balance.key = p_s_key->on_disk_key; + s_paste_balance.key = key->on_disk_key; #endif /* DQUOT_* can schedule, must check before the fix_nodes */ @@ -1949,13 +1952,13 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree while ((retval = fix_nodes(M_PASTE, &s_paste_balance, NULL, - p_c_body)) == REPEAT_SEARCH) { + body)) == REPEAT_SEARCH) { search_again: /* file system changed while we were in the fix_nodes */ PROC_INFO_INC(th->t_super, paste_into_item_restarted); retval = - search_for_position_by_key(th->t_super, p_s_key, - p_s_search_path); + search_for_position_by_key(th->t_super, key, + search_path); if (retval == IO_ERROR) { retval = -EIO; goto error_out; @@ -1963,19 +1966,19 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree if (retval == POSITION_FOUND) { reiserfs_warning(inode->i_sb, "PAP-5710", "entry or pasted byte (%K) exists", - p_s_key); + key); retval = -EEXIST; goto error_out; } #ifdef CONFIG_REISERFS_CHECK - check_research_for_paste(p_s_search_path, p_s_key); + check_research_for_paste(search_path, key); #endif } /* Perform balancing after all resources are collected by fix_nodes, and accessing them will not risk triggering schedule. */ if (retval == CARRY_ON) { - do_balance(&s_paste_balance, NULL /*ih */ , p_c_body, M_PASTE); + do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE); return 0; } retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO; @@ -1986,17 +1989,23 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): freeing %u id=%u type=%c", n_pasted_size, inode->i_uid, - key2type(&(p_s_key->on_disk_key))); + key2type(&(key->on_disk_key))); #endif DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size); return retval; } -/* Insert new item into the buffer at the path. */ -int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_path, /* Path to the inserteded item. */ - const struct cpu_key *key, struct item_head *p_s_ih, /* Pointer to the item header to insert. */ - struct inode *inode, const char *p_c_body) -{ /* Pointer to the bytes to insert. */ +/* Insert new item into the buffer at the path. + * th - active transaction handle + * path - path to the inserted item + * ih - pointer to the item header to insert + * body - pointer to the bytes to insert + */ +int reiserfs_insert_item(struct reiserfs_transaction_handle *th, + struct treepath *path, const struct cpu_key *key, + struct item_head *ih, struct inode *inode, + const char *body) +{ struct tree_balance s_ins_balance; int retval; int fs_gen = 0; @@ -2006,28 +2015,27 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath if (inode) { /* Do we count quotas for item? */ fs_gen = get_generation(inode->i_sb); - quota_bytes = ih_item_len(p_s_ih); + quota_bytes = ih_item_len(ih); /* hack so the quota code doesn't have to guess if the file has ** a tail, links are always tails, so there's no guessing needed */ - if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_s_ih)) { + if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih)) quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE; - } #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota insert_item(): allocating %u id=%u type=%c", - quota_bytes, inode->i_uid, head2type(p_s_ih)); + quota_bytes, inode->i_uid, head2type(ih)); #endif /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) { - pathrelse(p_s_path); + pathrelse(path); return -EDQUOT; } } - init_tb_struct(th, &s_ins_balance, th->t_super, p_s_path, - IH_SIZE + ih_item_len(p_s_ih)); + init_tb_struct(th, &s_ins_balance, th->t_super, path, + IH_SIZE + ih_item_len(ih)); #ifdef DISPLACE_NEW_PACKING_LOCALITIES s_ins_balance.key = key->on_disk_key; #endif @@ -2037,12 +2045,12 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath } while ((retval = - fix_nodes(M_INSERT, &s_ins_balance, p_s_ih, - p_c_body)) == REPEAT_SEARCH) { + fix_nodes(M_INSERT, &s_ins_balance, ih, + body)) == REPEAT_SEARCH) { search_again: /* file system changed while we were in the fix_nodes */ PROC_INFO_INC(th->t_super, insert_item_restarted); - retval = search_item(th->t_super, key, p_s_path); + retval = search_item(th->t_super, key, path); if (retval == IO_ERROR) { retval = -EIO; goto error_out; @@ -2058,7 +2066,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath /* make balancing after all resources will be collected at a time */ if (retval == CARRY_ON) { - do_balance(&s_ins_balance, p_s_ih, p_c_body, M_INSERT); + do_balance(&s_ins_balance, ih, body, M_INSERT); return 0; } @@ -2069,7 +2077,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath #ifdef REISERQUOTA_DEBUG reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, "reiserquota insert_item(): freeing %u id=%u type=%c", - quota_bytes, inode->i_uid, head2type(p_s_ih)); + quota_bytes, inode->i_uid, head2type(ih)); #endif if (inode) DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes); diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index 5c5ee0d0d6a8..2b90c0e5697c 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -172,10 +172,12 @@ void reiserfs_unmap_buffer(struct buffer_head *bh) inode */ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *inode, struct page *page, - struct treepath *p_s_path, /* path to the indirect item. */ - const struct cpu_key *p_s_item_key, /* Key to look for unformatted node pointer to be cut. */ + struct treepath *path, /* path to the indirect item. */ + const struct cpu_key *item_key, /* Key to look for + * unformatted node + * pointer to be cut. */ loff_t n_new_file_size, /* New file size. */ - char *p_c_mode) + char *mode) { struct super_block *sb = inode->i_sb; struct item_head s_ih; @@ -189,10 +191,10 @@ int indirect2direct(struct reiserfs_transaction_handle *th, REISERFS_SB(sb)->s_indirect2direct++; - *p_c_mode = M_SKIP_BALANCING; + *mode = M_SKIP_BALANCING; /* store item head path points to. */ - copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); + copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); tail_len = (n_new_file_size & (n_block_size - 1)); if (get_inode_sd_version(inode) == STAT_DATA_V2) @@ -211,14 +213,14 @@ int indirect2direct(struct reiserfs_transaction_handle *th, tail = (char *)kmap(page); /* this can schedule */ - if (path_changed(&s_ih, p_s_path)) { + if (path_changed(&s_ih, path)) { /* re-search indirect item */ - if (search_for_position_by_key(sb, p_s_item_key, p_s_path) + if (search_for_position_by_key(sb, item_key, path) == POSITION_NOT_FOUND) reiserfs_panic(sb, "PAP-5520", "item to be converted %K does not exist", - p_s_item_key); - copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path)); + item_key); + copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); #ifdef CONFIG_REISERFS_CHECK pos = le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE - @@ -240,13 +242,13 @@ int indirect2direct(struct reiserfs_transaction_handle *th, */ tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); - PATH_LAST_POSITION(p_s_path)++; + PATH_LAST_POSITION(path)++; - key = *p_s_item_key; + key = *item_key; set_cpu_key_k_type(&key, TYPE_DIRECT); key.key_length = 4; /* Insert tail as new direct item in the tree */ - if (reiserfs_insert_item(th, p_s_path, &key, &s_ih, inode, + if (reiserfs_insert_item(th, path, &key, &s_ih, inode, tail ? tail : NULL) < 0) { /* No disk memory. So we can not convert last unformatted node to the direct item. In this case we used to adjust @@ -268,7 +270,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, /* We have inserted new direct item and must remove last unformatted node. */ - *p_c_mode = M_CUT; + *mode = M_CUT; /* we store position of first direct item in the in-core inode */ /* mark_file_with_tail (inode, pos1 + 1); */ diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index b72dc2095478..e711c796e9d1 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -694,9 +694,9 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key) #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key))) #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key))) -#define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \ - ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \ - I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) ) +#define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \ + (!COMP_SHORT_KEYS(ih, key) && \ + I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize)) /* maximal length of item */ #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE) @@ -1196,33 +1196,33 @@ struct treepath { struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} /* Get path element by path and path position. */ -#define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset)) +#define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset)) /* Get buffer header at the path by path and path position. */ -#define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer) +#define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer) /* Get position in the element at the path by path and path position. */ -#define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position) +#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position) -#define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length)) +#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length)) /* you know, to the person who didn't write this the macro name does not at first suggest what it does. Maybe POSITION_FROM_PATH_END? Or maybe we should just focus on dumping paths... -Hans */ -#define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length)) +#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length)) -#define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path)) +#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path)) /* in do_balance leaf has h == 0 in contrast with path structure, where root has level == 0. That is why we need these defines */ -#define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */ +#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */ #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */ #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h)) #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */ -#define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h)) +#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h)) #define get_last_bh(path) PATH_PLAST_BUFFER(path) #define get_ih(path) PATH_PITEM_HEAD(path) @@ -1512,7 +1512,7 @@ extern struct item_operations *item_ops[TYPE_ANY + 1]; #define COMP_SHORT_KEYS comp_short_keys /* number of blocks pointed to by the indirect item */ -#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE ) +#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE) /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */ #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size)) @@ -1793,8 +1793,8 @@ int reiserfs_convert_objectid_map_v1(struct super_block *); /* stree.c */ int B_IS_IN_TREE(const struct buffer_head *); -extern void copy_item_head(struct item_head *p_v_to, - const struct item_head *p_v_from); +extern void copy_item_head(struct item_head *to, + const struct item_head *from); // first key is in cpu form, second - le extern int comp_short_keys(const struct reiserfs_key *le_key, @@ -1829,20 +1829,20 @@ static inline void copy_key(struct reiserfs_key *to, memcpy(to, from, KEY_SIZE); } -int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path); -const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, +int comp_items(const struct item_head *stored_ih, const struct treepath *path); +const struct reiserfs_key *get_rkey(const struct treepath *chk_path, const struct super_block *sb); int search_by_key(struct super_block *, const struct cpu_key *, struct treepath *, int); #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) int search_for_position_by_key(struct super_block *sb, - const struct cpu_key *p_s_cpu_key, - struct treepath *p_s_search_path); + const struct cpu_key *cpu_key, + struct treepath *search_path); extern void decrement_bcount(struct buffer_head *bh); -void decrement_counters_in_path(struct treepath *p_s_search_path); -void pathrelse(struct treepath *p_s_search_path); +void decrement_counters_in_path(struct treepath *search_path); +void pathrelse(struct treepath *search_path); int reiserfs_check_path(struct treepath *p); -void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path); +void pathrelse_and_restore(struct super_block *s, struct treepath *search_path); int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *path, @@ -1865,7 +1865,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *key, - struct inode *inode, struct buffer_head *p_s_un_bh); + struct inode *inode, struct buffer_head *un_bh); void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, struct inode *inode, struct reiserfs_key *key); @@ -2005,7 +2005,7 @@ extern const struct address_space_operations reiserfs_address_space_operations; /* fix_nodes.c */ int fix_nodes(int n_op_mode, struct tree_balance *tb, - struct item_head *p_s_ins_ih, const void *); + struct item_head *ins_ih, const void *); void unfix_nodes(struct tree_balance *); /* prints.c */ -- cgit v1.2.3-59-g8ed1b From ee93961be1faddf9e9a638bc519145c20f0cfeba Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 30 Mar 2009 14:02:50 -0400 Subject: reiserfs: rename [cn]_* variables This patch renames n_, c_, etc variables to something more sane. This is the sixth in a series of patches to rip out some of the awful variable naming in reiserfs. Signed-off-by: Jeff Mahoney Signed-off-by: Linus Torvalds --- fs/reiserfs/file.c | 6 +- fs/reiserfs/fix_node.c | 474 +++++++++++++++++++++--------------------- fs/reiserfs/stree.c | 370 ++++++++++++++++----------------- fs/reiserfs/tail_conversion.c | 30 +-- 4 files changed, 438 insertions(+), 442 deletions(-) (limited to 'fs/reiserfs/fix_node.c') diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index cde16429ff00..9f436668b7f8 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -138,11 +138,11 @@ static int reiserfs_sync_file(struct file *filp, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; - int n_err; + int err; int barrier_done; BUG_ON(!S_ISREG(inode->i_mode)); - n_err = sync_mapping_buffers(inode->i_mapping); + err = sync_mapping_buffers(inode->i_mapping); reiserfs_write_lock(inode->i_sb); barrier_done = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); @@ -150,7 +150,7 @@ static int reiserfs_sync_file(struct file *filp, blkdev_issue_flush(inode->i_sb->s_bdev, NULL); if (barrier_done < 0) return barrier_done; - return (n_err < 0) ? -EIO : 0; + return (err < 0) ? -EIO : 0; } /* taken fs/buffer.c:__block_commit_write */ diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index d97a55574ba9..5e5a4e6fbaf8 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -751,24 +751,24 @@ else \ static void free_buffers_in_tb(struct tree_balance *tb) { - int n_counter; + int i; pathrelse(tb->tb_path); - for (n_counter = 0; n_counter < MAX_HEIGHT; n_counter++) { - brelse(tb->L[n_counter]); - brelse(tb->R[n_counter]); - brelse(tb->FL[n_counter]); - brelse(tb->FR[n_counter]); - brelse(tb->CFL[n_counter]); - brelse(tb->CFR[n_counter]); - - tb->L[n_counter] = NULL; - tb->R[n_counter] = NULL; - tb->FL[n_counter] = NULL; - tb->FR[n_counter] = NULL; - tb->CFL[n_counter] = NULL; - tb->CFR[n_counter] = NULL; + for (i = 0; i < MAX_HEIGHT; i++) { + brelse(tb->L[i]); + brelse(tb->R[i]); + brelse(tb->FL[i]); + brelse(tb->FR[i]); + brelse(tb->CFL[i]); + brelse(tb->CFR[i]); + + tb->L[i] = NULL; + tb->R[i] = NULL; + tb->FL[i] = NULL; + tb->FR[i] = NULL; + tb->CFL[i] = NULL; + tb->CFR[i] = NULL; } } @@ -778,13 +778,13 @@ static void free_buffers_in_tb(struct tree_balance *tb) * NO_DISK_SPACE - no disk space. */ /* The function is NOT SCHEDULE-SAFE! */ -static int get_empty_nodes(struct tree_balance *tb, int n_h) +static int get_empty_nodes(struct tree_balance *tb, int h) { struct buffer_head *new_bh, - *Sh = PATH_H_PBUFFER(tb->tb_path, n_h); - b_blocknr_t *blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; - int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */ - n_retval = CARRY_ON; + *Sh = PATH_H_PBUFFER(tb->tb_path, h); + b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; + int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */ + retval = CARRY_ON; struct super_block *sb = tb->tb_sb; /* number_of_freeblk is the number of empty blocks which have been @@ -793,7 +793,7 @@ static int get_empty_nodes(struct tree_balance *tb, int n_h) number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs after empty blocks are acquired, and the balancing analysis is then restarted, amount_needed is the number needed by this level - (n_h) of the balancing analysis. + (h) of the balancing analysis. Note that for systems with many processes writing, it would be more layout optimal to calculate the total number needed by all @@ -801,31 +801,31 @@ static int get_empty_nodes(struct tree_balance *tb, int n_h) /* Initiate number_of_freeblk to the amount acquired prior to the restart of the analysis or 0 if not restarted, then subtract the amount needed - by all of the levels of the tree below n_h. */ - /* blknum includes S[n_h], so we subtract 1 in this calculation */ - for (n_counter = 0, n_number_of_freeblk = tb->cur_blknum; - n_counter < n_h; n_counter++) - n_number_of_freeblk -= - (tb->blknum[n_counter]) ? (tb->blknum[n_counter] - + by all of the levels of the tree below h. */ + /* blknum includes S[h], so we subtract 1 in this calculation */ + for (counter = 0, number_of_freeblk = tb->cur_blknum; + counter < h; counter++) + number_of_freeblk -= + (tb->blknum[counter]) ? (tb->blknum[counter] - 1) : 0; /* Allocate missing empty blocks. */ /* if Sh == 0 then we are getting a new root */ - n_amount_needed = (Sh) ? (tb->blknum[n_h] - 1) : 1; + amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1; /* Amount_needed = the amount that we need more than the amount that we have. */ - if (n_amount_needed > n_number_of_freeblk) - n_amount_needed -= n_number_of_freeblk; + if (amount_needed > number_of_freeblk) + amount_needed -= number_of_freeblk; else /* If we have enough already then there is nothing to do. */ return CARRY_ON; /* No need to check quota - is not allocated for blocks used for formatted nodes */ - if (reiserfs_new_form_blocknrs(tb, a_n_blocknrs, - n_amount_needed) == NO_DISK_SPACE) + if (reiserfs_new_form_blocknrs(tb, blocknrs, + amount_needed) == NO_DISK_SPACE) return NO_DISK_SPACE; /* for each blocknumber we just got, get a buffer and stick it on FEB */ - for (blocknr = a_n_blocknrs, n_counter = 0; - n_counter < n_amount_needed; blocknr++, n_counter++) { + for (blocknr = blocknrs, counter = 0; + counter < amount_needed; blocknr++, counter++) { RFALSE(!*blocknr, "PAP-8135: reiserfs_new_blocknrs failed when got new blocks"); @@ -845,10 +845,10 @@ static int get_empty_nodes(struct tree_balance *tb, int n_h) tb->FEB[tb->cur_blknum++] = new_bh; } - if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb)) - n_retval = REPEAT_SEARCH; + if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb)) + retval = REPEAT_SEARCH; - return n_retval; + return retval; } /* Get free space of the left neighbor, which is stored in the parent @@ -896,36 +896,36 @@ static int get_rfree(struct tree_balance *tb, int h) } /* Check whether left neighbor is in memory. */ -static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h) +static int is_left_neighbor_in_cache(struct tree_balance *tb, int h) { struct buffer_head *father, *left; struct super_block *sb = tb->tb_sb; - b_blocknr_t n_left_neighbor_blocknr; - int n_left_neighbor_position; + b_blocknr_t left_neighbor_blocknr; + int left_neighbor_position; /* Father of the left neighbor does not exist. */ - if (!tb->FL[n_h]) + if (!tb->FL[h]) return 0; /* Calculate father of the node to be balanced. */ - father = PATH_H_PBUFFER(tb->tb_path, n_h + 1); + father = PATH_H_PBUFFER(tb->tb_path, h + 1); RFALSE(!father || !B_IS_IN_TREE(father) || - !B_IS_IN_TREE(tb->FL[n_h]) || + !B_IS_IN_TREE(tb->FL[h]) || !buffer_uptodate(father) || - !buffer_uptodate(tb->FL[n_h]), + !buffer_uptodate(tb->FL[h]), "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", - father, tb->FL[n_h]); + father, tb->FL[h]); /* Get position of the pointer to the left neighbor into the left father. */ - n_left_neighbor_position = (father == tb->FL[n_h]) ? - tb->lkey[n_h] : B_NR_ITEMS(tb->FL[n_h]); + left_neighbor_position = (father == tb->FL[h]) ? + tb->lkey[h] : B_NR_ITEMS(tb->FL[h]); /* Get left neighbor block number. */ - n_left_neighbor_blocknr = - B_N_CHILD_NUM(tb->FL[n_h], n_left_neighbor_position); + left_neighbor_blocknr = + B_N_CHILD_NUM(tb->FL[h], left_neighbor_position); /* Look for the left neighbor in the cache. */ - if ((left = sb_find_get_block(sb, n_left_neighbor_blocknr))) { + if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) { RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left), "vs-8170: left neighbor (%b %z) is not in the tree", @@ -955,7 +955,7 @@ static void decrement_key(struct cpu_key *key) * CARRY_ON - schedule didn't occur while the function worked; */ static int get_far_parent(struct tree_balance *tb, - int n_h, + int h, struct buffer_head **pfather, struct buffer_head **pcom_father, char c_lr_par) { @@ -963,38 +963,38 @@ static int get_far_parent(struct tree_balance *tb, INITIALIZE_PATH(s_path_to_neighbor_father); struct treepath *path = tb->tb_path; struct cpu_key s_lr_father_key; - int n_counter, - n_position = INT_MAX, - n_first_last_position = 0, - n_path_offset = PATH_H_PATH_OFFSET(path, n_h); + int counter, + position = INT_MAX, + first_last_position = 0, + path_offset = PATH_H_PATH_OFFSET(path, h); - /* Starting from F[n_h] go upwards in the tree, and look for the common - ancestor of F[n_h], and its neighbor l/r, that should be obtained. */ + /* Starting from F[h] go upwards in the tree, and look for the common + ancestor of F[h], and its neighbor l/r, that should be obtained. */ - n_counter = n_path_offset; + counter = path_offset; - RFALSE(n_counter < FIRST_PATH_ELEMENT_OFFSET, + RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET, "PAP-8180: invalid path length"); - for (; n_counter > FIRST_PATH_ELEMENT_OFFSET; n_counter--) { + for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) { /* Check whether parent of the current buffer in the path is really parent in the tree. */ if (!B_IS_IN_TREE - (parent = PATH_OFFSET_PBUFFER(path, n_counter - 1))) + (parent = PATH_OFFSET_PBUFFER(path, counter - 1))) return REPEAT_SEARCH; /* Check whether position in the parent is correct. */ - if ((n_position = + if ((position = PATH_OFFSET_POSITION(path, - n_counter - 1)) > + counter - 1)) > B_NR_ITEMS(parent)) return REPEAT_SEARCH; /* Check whether parent at the path really points to the child. */ - if (B_N_CHILD_NUM(parent, n_position) != - PATH_OFFSET_PBUFFER(path, n_counter)->b_blocknr) + if (B_N_CHILD_NUM(parent, position) != + PATH_OFFSET_PBUFFER(path, counter)->b_blocknr) return REPEAT_SEARCH; /* Return delimiting key if position in the parent is not equal to first/last one. */ if (c_lr_par == RIGHT_PARENTS) - n_first_last_position = B_NR_ITEMS(parent); - if (n_position != n_first_last_position) { + first_last_position = B_NR_ITEMS(parent); + if (position != first_last_position) { *pcom_father = parent; get_bh(*pcom_father); /*(*pcom_father = parent)->b_count++; */ @@ -1003,7 +1003,7 @@ static int get_far_parent(struct tree_balance *tb, } /* if we are in the root of the tree, then there is no common father */ - if (n_counter == FIRST_PATH_ELEMENT_OFFSET) { + if (counter == FIRST_PATH_ELEMENT_OFFSET) { /* Check whether first buffer in the path is the root of the tree. */ if (PATH_OFFSET_PBUFFER (tb->tb_path, @@ -1036,18 +1036,18 @@ static int get_far_parent(struct tree_balance *tb, le_key2cpu_key(&s_lr_father_key, B_N_PDELIM_KEY(*pcom_father, (c_lr_par == - LEFT_PARENTS) ? (tb->lkey[n_h - 1] = - n_position - - 1) : (tb->rkey[n_h - + LEFT_PARENTS) ? (tb->lkey[h - 1] = + position - + 1) : (tb->rkey[h - 1] = - n_position))); + position))); if (c_lr_par == LEFT_PARENTS) decrement_key(&s_lr_father_key); if (search_by_key (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, - n_h + 1) == IO_ERROR) + h + 1) == IO_ERROR) // path is released return IO_ERROR; @@ -1059,7 +1059,7 @@ static int get_far_parent(struct tree_balance *tb, *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father); - RFALSE(B_LEVEL(*pfather) != n_h + 1, + RFALSE(B_LEVEL(*pfather) != h + 1, "PAP-8190: (%b %z) level too small", *pfather, *pfather); RFALSE(s_path_to_neighbor_father.path_length < FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small"); @@ -1069,92 +1069,92 @@ static int get_far_parent(struct tree_balance *tb, return CARRY_ON; } -/* Get parents of neighbors of node in the path(S[n_path_offset]) and common parents of - * S[n_path_offset] and L[n_path_offset]/R[n_path_offset]: F[n_path_offset], FL[n_path_offset], - * FR[n_path_offset], CFL[n_path_offset], CFR[n_path_offset]. - * Calculate numbers of left and right delimiting keys position: lkey[n_path_offset], rkey[n_path_offset]. +/* Get parents of neighbors of node in the path(S[path_offset]) and common parents of + * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset], + * FR[path_offset], CFL[path_offset], CFR[path_offset]. + * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset]. * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ -static int get_parents(struct tree_balance *tb, int n_h) +static int get_parents(struct tree_balance *tb, int h) { struct treepath *path = tb->tb_path; - int n_position, - n_ret_value, - n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h); + int position, + ret, + path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); struct buffer_head *curf, *curcf; /* Current node is the root of the tree or will be root of the tree */ - if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) { + if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) { /* The root can not have parents. Release nodes which previously were obtained as parents of the current node neighbors. */ - brelse(tb->FL[n_h]); - brelse(tb->CFL[n_h]); - brelse(tb->FR[n_h]); - brelse(tb->CFR[n_h]); - tb->FL[n_h] = NULL; - tb->CFL[n_h] = NULL; - tb->FR[n_h] = NULL; - tb->CFR[n_h] = NULL; + brelse(tb->FL[h]); + brelse(tb->CFL[h]); + brelse(tb->FR[h]); + brelse(tb->CFR[h]); + tb->FL[h] = NULL; + tb->CFL[h] = NULL; + tb->FR[h] = NULL; + tb->CFR[h] = NULL; return CARRY_ON; } - /* Get parent FL[n_path_offset] of L[n_path_offset]. */ - n_position = PATH_OFFSET_POSITION(path, n_path_offset - 1); - if (n_position) { + /* Get parent FL[path_offset] of L[path_offset]. */ + position = PATH_OFFSET_POSITION(path, path_offset - 1); + if (position) { /* Current node is not the first child of its parent. */ - curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); - curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); + curf = PATH_OFFSET_PBUFFER(path, path_offset - 1); + curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1); get_bh(curf); get_bh(curf); - tb->lkey[n_h] = n_position - 1; + tb->lkey[h] = position - 1; } else { - /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node. - Calculate current common parent of L[n_path_offset] and the current node. Note that - CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset]. - Calculate lkey[n_path_offset]. */ - if ((n_ret_value = get_far_parent(tb, n_h + 1, &curf, + /* Calculate current parent of L[path_offset], which is the left neighbor of the current node. + Calculate current common parent of L[path_offset] and the current node. Note that + CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset]. + Calculate lkey[path_offset]. */ + if ((ret = get_far_parent(tb, h + 1, &curf, &curcf, LEFT_PARENTS)) != CARRY_ON) - return n_ret_value; + return ret; } - brelse(tb->FL[n_h]); - tb->FL[n_h] = curf; /* New initialization of FL[n_h]. */ - brelse(tb->CFL[n_h]); - tb->CFL[n_h] = curcf; /* New initialization of CFL[n_h]. */ + brelse(tb->FL[h]); + tb->FL[h] = curf; /* New initialization of FL[h]. */ + brelse(tb->CFL[h]); + tb->CFL[h] = curcf; /* New initialization of CFL[h]. */ RFALSE((curf && !B_IS_IN_TREE(curf)) || (curcf && !B_IS_IN_TREE(curcf)), "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf); -/* Get parent FR[n_h] of R[n_h]. */ +/* Get parent FR[h] of R[h]. */ -/* Current node is the last child of F[n_h]. FR[n_h] != F[n_h]. */ - if (n_position == B_NR_ITEMS(PATH_H_PBUFFER(path, n_h + 1))) { -/* Calculate current parent of R[n_h], which is the right neighbor of F[n_h]. - Calculate current common parent of R[n_h] and current node. Note that CFR[n_h] - not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */ - if ((n_ret_value = - get_far_parent(tb, n_h + 1, &curf, &curcf, +/* Current node is the last child of F[h]. FR[h] != F[h]. */ + if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) { +/* Calculate current parent of R[h], which is the right neighbor of F[h]. + Calculate current common parent of R[h] and current node. Note that CFR[h] + not equal FR[path_offset] and CFR[h] not equal F[h]. */ + if ((ret = + get_far_parent(tb, h + 1, &curf, &curcf, RIGHT_PARENTS)) != CARRY_ON) - return n_ret_value; + return ret; } else { -/* Current node is not the last child of its parent F[n_h]. */ - curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); - curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1); +/* Current node is not the last child of its parent F[h]. */ + curf = PATH_OFFSET_PBUFFER(path, path_offset - 1); + curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1); get_bh(curf); get_bh(curf); - tb->rkey[n_h] = n_position; + tb->rkey[h] = position; } - brelse(tb->FR[n_h]); - /* New initialization of FR[n_path_offset]. */ - tb->FR[n_h] = curf; + brelse(tb->FR[h]); + /* New initialization of FR[path_offset]. */ + tb->FR[h] = curf; - brelse(tb->CFR[n_h]); - /* New initialization of CFR[n_path_offset]. */ - tb->CFR[n_h] = curcf; + brelse(tb->CFR[h]); + /* New initialization of CFR[path_offset]. */ + tb->CFR[h] = curcf; RFALSE((curf && !B_IS_IN_TREE(curf)) || (curcf && !B_IS_IN_TREE(curcf)), @@ -1222,7 +1222,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) contains node being balanced. The mnemonic is that the attempted change in node space used level is levbytes bytes. */ - n_ret_value; + ret; int lfree, sfree, rfree /* free space in L, S and R */ ; @@ -1262,22 +1262,22 @@ static int ip_check_balance(struct tree_balance *tb, int h) if (!h) reiserfs_panic(tb->tb_sb, "vs-8210", "S[0] can not be 0"); - switch (n_ret_value = get_empty_nodes(tb, h)) { + switch (ret = get_empty_nodes(tb, h)) { case CARRY_ON: set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */ case NO_DISK_SPACE: case REPEAT_SEARCH: - return n_ret_value; + return ret; default: reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect " "return value of get_empty_nodes"); } } - if ((n_ret_value = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */ - return n_ret_value; + if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */ + return ret; sfree = B_FREE_SPACE(Sh); @@ -1564,7 +1564,7 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) /* Sh is the node whose balance is currently being checked, and Fh is its father. */ struct buffer_head *Sh, *Fh; - int maxsize, n_ret_value; + int maxsize, ret; int lfree, rfree /* free space in L and R */ ; Sh = PATH_H_PBUFFER(tb->tb_path, h); @@ -1589,8 +1589,8 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) return CARRY_ON; } - if ((n_ret_value = get_parents(tb, h)) != CARRY_ON) - return n_ret_value; + if ((ret = get_parents(tb, h)) != CARRY_ON) + return ret; /* get free space of neighbors */ rfree = get_rfree(tb, h); @@ -1747,7 +1747,7 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) attempted change in node space used level is levbytes bytes. */ int levbytes; /* the maximal item size */ - int maxsize, n_ret_value; + int maxsize, ret; /* S0 is the node whose balance is currently being checked, and F0 is its father. */ struct buffer_head *S0, *F0; @@ -1769,8 +1769,8 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) return NO_BALANCING_NEEDED; } - if ((n_ret_value = get_parents(tb, h)) != CARRY_ON) - return n_ret_value; + if ((ret = get_parents(tb, h)) != CARRY_ON) + return ret; /* get free space of neighbors */ rfree = get_rfree(tb, h); @@ -1889,40 +1889,40 @@ static int check_balance(int mode, } /* Check whether parent at the path is the really parent of the current node.*/ -static int get_direct_parent(struct tree_balance *tb, int n_h) +static int get_direct_parent(struct tree_balance *tb, int h) { struct buffer_head *bh; struct treepath *path = tb->tb_path; - int n_position, - n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h); + int position, + path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); /* We are in the root or in the new root. */ - if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) { + if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) { - RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET - 1, + RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1, "PAP-8260: invalid offset in the path"); if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { /* Root is not changed. */ - PATH_OFFSET_PBUFFER(path, n_path_offset - 1) = NULL; - PATH_OFFSET_POSITION(path, n_path_offset - 1) = 0; + PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL; + PATH_OFFSET_POSITION(path, path_offset - 1) = 0; return CARRY_ON; } return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */ } if (!B_IS_IN_TREE - (bh = PATH_OFFSET_PBUFFER(path, n_path_offset - 1))) + (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1))) return REPEAT_SEARCH; /* Parent in the path is not in the tree. */ - if ((n_position = + if ((position = PATH_OFFSET_POSITION(path, - n_path_offset - 1)) > B_NR_ITEMS(bh)) + path_offset - 1)) > B_NR_ITEMS(bh)) return REPEAT_SEARCH; - if (B_N_CHILD_NUM(bh, n_position) != - PATH_OFFSET_PBUFFER(path, n_path_offset)->b_blocknr) + if (B_N_CHILD_NUM(bh, position) != + PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr) /* Parent in the path is not parent of the current node in the tree. */ return REPEAT_SEARCH; @@ -1935,92 +1935,92 @@ static int get_direct_parent(struct tree_balance *tb, int n_h) return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */ } -/* Using lnum[n_h] and rnum[n_h] we should determine what neighbors - * of S[n_h] we - * need in order to balance S[n_h], and get them if necessary. +/* Using lnum[h] and rnum[h] we should determine what neighbors + * of S[h] we + * need in order to balance S[h], and get them if necessary. * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ -static int get_neighbors(struct tree_balance *tb, int n_h) +static int get_neighbors(struct tree_balance *tb, int h) { - int n_child_position, - n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h + 1); - unsigned long n_son_number; + int child_position, + path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1); + unsigned long son_number; struct super_block *sb = tb->tb_sb; struct buffer_head *bh; - PROC_INFO_INC(sb, get_neighbors[n_h]); + PROC_INFO_INC(sb, get_neighbors[h]); - if (tb->lnum[n_h]) { - /* We need left neighbor to balance S[n_h]. */ - PROC_INFO_INC(sb, need_l_neighbor[n_h]); - bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset); + if (tb->lnum[h]) { + /* We need left neighbor to balance S[h]. */ + PROC_INFO_INC(sb, need_l_neighbor[h]); + bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); - RFALSE(bh == tb->FL[n_h] && - !PATH_OFFSET_POSITION(tb->tb_path, n_path_offset), + RFALSE(bh == tb->FL[h] && + !PATH_OFFSET_POSITION(tb->tb_path, path_offset), "PAP-8270: invalid position in the parent"); - n_child_position = + child_position = (bh == - tb->FL[n_h]) ? tb->lkey[n_h] : B_NR_ITEMS(tb-> - FL[n_h]); - n_son_number = B_N_CHILD_NUM(tb->FL[n_h], n_child_position); - bh = sb_bread(sb, n_son_number); + tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb-> + FL[h]); + son_number = B_N_CHILD_NUM(tb->FL[h], child_position); + bh = sb_bread(sb, son_number); if (!bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(tb)) { brelse(bh); - PROC_INFO_INC(sb, get_neighbors_restart[n_h]); + PROC_INFO_INC(sb, get_neighbors_restart[h]); return REPEAT_SEARCH; } - RFALSE(!B_IS_IN_TREE(tb->FL[n_h]) || - n_child_position > B_NR_ITEMS(tb->FL[n_h]) || - B_N_CHILD_NUM(tb->FL[n_h], n_child_position) != + RFALSE(!B_IS_IN_TREE(tb->FL[h]) || + child_position > B_NR_ITEMS(tb->FL[h]) || + B_N_CHILD_NUM(tb->FL[h], child_position) != bh->b_blocknr, "PAP-8275: invalid parent"); RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child"); - RFALSE(!n_h && + RFALSE(!h && B_FREE_SPACE(bh) != MAX_CHILD_SIZE(bh) - - dc_size(B_N_CHILD(tb->FL[0], n_child_position)), + dc_size(B_N_CHILD(tb->FL[0], child_position)), "PAP-8290: invalid child size of left neighbor"); - brelse(tb->L[n_h]); - tb->L[n_h] = bh; + brelse(tb->L[h]); + tb->L[h] = bh; } - /* We need right neighbor to balance S[n_path_offset]. */ - if (tb->rnum[n_h]) { - PROC_INFO_INC(sb, need_r_neighbor[n_h]); - bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset); + /* We need right neighbor to balance S[path_offset]. */ + if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */ + PROC_INFO_INC(sb, need_r_neighbor[h]); + bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); - RFALSE(bh == tb->FR[n_h] && + RFALSE(bh == tb->FR[h] && PATH_OFFSET_POSITION(tb->tb_path, - n_path_offset) >= + path_offset) >= B_NR_ITEMS(bh), "PAP-8295: invalid position in the parent"); - n_child_position = - (bh == tb->FR[n_h]) ? tb->rkey[n_h] + 1 : 0; - n_son_number = B_N_CHILD_NUM(tb->FR[n_h], n_child_position); - bh = sb_bread(sb, n_son_number); + child_position = + (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0; + son_number = B_N_CHILD_NUM(tb->FR[h], child_position); + bh = sb_bread(sb, son_number); if (!bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(tb)) { brelse(bh); - PROC_INFO_INC(sb, get_neighbors_restart[n_h]); + PROC_INFO_INC(sb, get_neighbors_restart[h]); return REPEAT_SEARCH; } - brelse(tb->R[n_h]); - tb->R[n_h] = bh; + brelse(tb->R[h]); + tb->R[h] = bh; - RFALSE(!n_h + RFALSE(!h && B_FREE_SPACE(bh) != MAX_CHILD_SIZE(bh) - - dc_size(B_N_CHILD(tb->FR[0], n_child_position)), + dc_size(B_N_CHILD(tb->FR[0], child_position)), "PAP-8300: invalid child size of right neighbor (%d != %d - %d)", B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh), - dc_size(B_N_CHILD(tb->FR[0], n_child_position))); + dc_size(B_N_CHILD(tb->FR[0], child_position))); } return CARRY_ON; @@ -2317,11 +2317,11 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) * -1 - if no_disk_space */ -int fix_nodes(int n_op_mode, struct tree_balance *tb, +int fix_nodes(int op_mode, struct tree_balance *tb, struct item_head *ins_ih, const void *data) { - int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(tb->tb_path); - int n_pos_in_item; + int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path); + int pos_in_item; /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared ** during wait_tb_buffers_run @@ -2331,7 +2331,7 @@ int fix_nodes(int n_op_mode, struct tree_balance *tb, ++REISERFS_SB(tb->tb_sb)->s_fix_nodes; - n_pos_in_item = tb->tb_path->pos_in_item; + pos_in_item = tb->tb_path->pos_in_item; tb->fs_gen = get_generation(tb->tb_sb); @@ -2364,26 +2364,26 @@ int fix_nodes(int n_op_mode, struct tree_balance *tb, reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is " "not uptodate at the beginning of fix_nodes " "or not in tree (mode %c)", - tbS0, tbS0, n_op_mode); + tbS0, tbS0, op_mode); /* Check parameters. */ - switch (n_op_mode) { + switch (op_mode) { case M_INSERT: - if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(tbS0)) + if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0)) reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect " "item number %d (in S0 - %d) in case " - "of insert", n_item_num, + "of insert", item_num, B_NR_ITEMS(tbS0)); break; case M_PASTE: case M_DELETE: case M_CUT: - if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(tbS0)) { + if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) { print_block(tbS0, 0, -1, -1); reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect " "item number(%d); mode = %c " "insert_size = %d", - n_item_num, n_op_mode, + item_num, op_mode, tb->insert_size[0]); } break; @@ -2397,73 +2397,73 @@ int fix_nodes(int n_op_mode, struct tree_balance *tb, // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat return REPEAT_SEARCH; - /* Starting from the leaf level; for all levels n_h of the tree. */ - for (n_h = 0; n_h < MAX_HEIGHT && tb->insert_size[n_h]; n_h++) { - n_ret_value = get_direct_parent(tb, n_h); - if (n_ret_value != CARRY_ON) + /* Starting from the leaf level; for all levels h of the tree. */ + for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) { + ret = get_direct_parent(tb, h); + if (ret != CARRY_ON) goto repeat; - n_ret_value = check_balance(n_op_mode, tb, n_h, n_item_num, - n_pos_in_item, ins_ih, data); - if (n_ret_value != CARRY_ON) { - if (n_ret_value == NO_BALANCING_NEEDED) { + ret = check_balance(op_mode, tb, h, item_num, + pos_in_item, ins_ih, data); + if (ret != CARRY_ON) { + if (ret == NO_BALANCING_NEEDED) { /* No balancing for higher levels needed. */ - n_ret_value = get_neighbors(tb, n_h); - if (n_ret_value != CARRY_ON) + ret = get_neighbors(tb, h); + if (ret != CARRY_ON) goto repeat; - if (n_h != MAX_HEIGHT - 1) - tb->insert_size[n_h + 1] = 0; + if (h != MAX_HEIGHT - 1) + tb->insert_size[h + 1] = 0; /* ok, analysis and resource gathering are complete */ break; } goto repeat; } - n_ret_value = get_neighbors(tb, n_h); - if (n_ret_value != CARRY_ON) + ret = get_neighbors(tb, h); + if (ret != CARRY_ON) goto repeat; /* No disk space, or schedule occurred and analysis may be * invalid and needs to be redone. */ - n_ret_value = get_empty_nodes(tb, n_h); - if (n_ret_value != CARRY_ON) + ret = get_empty_nodes(tb, h); + if (ret != CARRY_ON) goto repeat; - if (!PATH_H_PBUFFER(tb->tb_path, n_h)) { + if (!PATH_H_PBUFFER(tb->tb_path, h)) { /* We have a positive insert size but no nodes exist on this level, this means that we are creating a new root. */ - RFALSE(tb->blknum[n_h] != 1, + RFALSE(tb->blknum[h] != 1, "PAP-8350: creating new empty root"); - if (n_h < MAX_HEIGHT - 1) - tb->insert_size[n_h + 1] = 0; - } else if (!PATH_H_PBUFFER(tb->tb_path, n_h + 1)) { - if (tb->blknum[n_h] > 1) { - /* The tree needs to be grown, so this node S[n_h] + if (h < MAX_HEIGHT - 1) + tb->insert_size[h + 1] = 0; + } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) { + if (tb->blknum[h] > 1) { + /* The tree needs to be grown, so this node S[h] which is the root node is split into two nodes, - and a new node (S[n_h+1]) will be created to + and a new node (S[h+1]) will be created to become the root node. */ - RFALSE(n_h == MAX_HEIGHT - 1, + RFALSE(h == MAX_HEIGHT - 1, "PAP-8355: attempt to create too high of a tree"); - tb->insert_size[n_h + 1] = + tb->insert_size[h + 1] = (DC_SIZE + - KEY_SIZE) * (tb->blknum[n_h] - 1) + + KEY_SIZE) * (tb->blknum[h] - 1) + DC_SIZE; - } else if (n_h < MAX_HEIGHT - 1) - tb->insert_size[n_h + 1] = 0; + } else if (h < MAX_HEIGHT - 1) + tb->insert_size[h + 1] = 0; } else - tb->insert_size[n_h + 1] = - (DC_SIZE + KEY_SIZE) * (tb->blknum[n_h] - 1); + tb->insert_size[h + 1] = + (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1); } - n_ret_value = wait_tb_buffers_until_unlocked(tb); - if (n_ret_value == CARRY_ON) { + ret = wait_tb_buffers_until_unlocked(tb); + if (ret == CARRY_ON) { if (FILESYSTEM_CHANGED_TB(tb)) { wait_tb_buffers_run = 1; - n_ret_value = REPEAT_SEARCH; + ret = REPEAT_SEARCH; goto repeat; } else { return CARRY_ON; @@ -2529,7 +2529,7 @@ int fix_nodes(int n_op_mode, struct tree_balance *tb, (tb->tb_sb, tb->FEB[i]); } } - return n_ret_value; + return ret; } } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index fd769c8dac32..e23303daa868 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -136,11 +136,11 @@ inline int comp_short_le_keys(const struct reiserfs_key *key1, const struct reiserfs_key *key2) { __u32 *k1_u32, *k2_u32; - int n_key_length = REISERFS_SHORT_KEY_LEN; + int key_length = REISERFS_SHORT_KEY_LEN; k1_u32 = (__u32 *) key1; k2_u32 = (__u32 *) key2; - for (; n_key_length--; ++k1_u32, ++k2_u32) { + for (; key_length--; ++k1_u32, ++k2_u32) { if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32)) return -1; if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32)) @@ -177,10 +177,10 @@ inline int comp_le_keys(const struct reiserfs_key *k1, * *pos = number of the searched element if found, else the * * number of the first element that is larger than key. * **************************************************************************/ -/* For those not familiar with binary search: n_lbound is the leftmost item that it - could be, n_rbound the rightmost item that it could be. We examine the item - halfway between n_lbound and n_rbound, and that tells us either that we can increase - n_lbound, or decrease n_rbound, or that we have found it, or if n_lbound <= n_rbound that +/* For those not familiar with binary search: lbound is the leftmost item that it + could be, rbound the rightmost item that it could be. We examine the item + halfway between lbound and rbound, and that tells us either that we can increase + lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that there are no possible items, and we have not found it. With each examination we cut the number of possible items it could be by one more than half rounded down, or we find it. */ @@ -198,28 +198,27 @@ static inline int bin_search(const void *key, /* Key to search for. */ int *pos /* Number of the searched for element. */ ) { - int n_rbound, n_lbound, n_j; + int rbound, lbound, j; - for (n_j = ((n_rbound = num - 1) + (n_lbound = 0)) / 2; - n_lbound <= n_rbound; n_j = (n_rbound + n_lbound) / 2) + for (j = ((rbound = num - 1) + (lbound = 0)) / 2; + lbound <= rbound; j = (rbound + lbound) / 2) switch (comp_keys - ((struct reiserfs_key *)((char *)base + - n_j * width), + ((struct reiserfs_key *)((char *)base + j * width), (struct cpu_key *)key)) { case -1: - n_lbound = n_j + 1; + lbound = j + 1; continue; case 1: - n_rbound = n_j - 1; + rbound = j - 1; continue; case 0: - *pos = n_j; + *pos = j; return ITEM_FOUND; /* Key found in the array. */ } /* bin_search did not find given key, it returns position of key, that is minimal and greater than the given one. */ - *pos = n_lbound; + *pos = lbound; return ITEM_NOT_FOUND; } @@ -242,43 +241,41 @@ static const struct reiserfs_key MAX_KEY = { of the path, and going upwards. We must check the path's validity at each step. If the key is not in the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this case we return a special key, either MIN_KEY or MAX_KEY. */ -static inline const struct reiserfs_key *get_lkey(const struct treepath - *chk_path, - const struct super_block - *sb) +static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path, + const struct super_block *sb) { - int n_position, n_path_offset = chk_path->path_length; + int position, path_offset = chk_path->path_length; struct buffer_head *parent; - RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET, + RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5010: invalid offset in the path"); /* While not higher in path than first element. */ - while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { + while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate - (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)), + (PATH_OFFSET_PBUFFER(chk_path, path_offset)), "PAP-5020: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE (parent = - PATH_OFFSET_PBUFFER(chk_path, n_path_offset))) + PATH_OFFSET_PBUFFER(chk_path, path_offset))) return &MAX_KEY; /* Check whether position in the parent is correct. */ - if ((n_position = + if ((position = PATH_OFFSET_POSITION(chk_path, - n_path_offset)) > + path_offset)) > B_NR_ITEMS(parent)) return &MAX_KEY; /* Check whether parent at the path really points to the child. */ - if (B_N_CHILD_NUM(parent, n_position) != + if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(chk_path, - n_path_offset + 1)->b_blocknr) + path_offset + 1)->b_blocknr) return &MAX_KEY; /* Return delimiting key if position in the parent is not equal to zero. */ - if (n_position) - return B_N_PDELIM_KEY(parent, n_position - 1); + if (position) + return B_N_PDELIM_KEY(parent, position - 1); } /* Return MIN_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> @@ -291,37 +288,37 @@ static inline const struct reiserfs_key *get_lkey(const struct treepath inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path, const struct super_block *sb) { - int n_position, n_path_offset = chk_path->path_length; + int position, path_offset = chk_path->path_length; struct buffer_head *parent; - RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET, + RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5030: invalid offset in the path"); - while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { + while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate - (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)), + (PATH_OFFSET_PBUFFER(chk_path, path_offset)), "PAP-5040: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE (parent = - PATH_OFFSET_PBUFFER(chk_path, n_path_offset))) + PATH_OFFSET_PBUFFER(chk_path, path_offset))) return &MIN_KEY; /* Check whether position in the parent is correct. */ - if ((n_position = + if ((position = PATH_OFFSET_POSITION(chk_path, - n_path_offset)) > + path_offset)) > B_NR_ITEMS(parent)) return &MIN_KEY; /* Check whether parent at the path really points to the child. */ - if (B_N_CHILD_NUM(parent, n_position) != + if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(chk_path, - n_path_offset + 1)->b_blocknr) + path_offset + 1)->b_blocknr) return &MIN_KEY; /* Return delimiting key if position in the parent is not the last one. */ - if (n_position != B_NR_ITEMS(parent)) - return B_N_PDELIM_KEY(parent, n_position); + if (position != B_NR_ITEMS(parent)) + return B_N_PDELIM_KEY(parent, position); } /* Return MAX_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> @@ -371,14 +368,14 @@ int reiserfs_check_path(struct treepath *p) void pathrelse_and_restore(struct super_block *sb, struct treepath *search_path) { - int n_path_offset = search_path->path_length; + int path_offset = search_path->path_length; - RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, + RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "clm-4000: invalid path offset"); - while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { + while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { struct buffer_head *bh; - bh = PATH_OFFSET_PBUFFER(search_path, n_path_offset--); + bh = PATH_OFFSET_PBUFFER(search_path, path_offset--); reiserfs_restore_prepared_buffer(sb, bh); brelse(bh); } @@ -388,13 +385,13 @@ void pathrelse_and_restore(struct super_block *sb, /* Drop the reference to each buffer in a path */ void pathrelse(struct treepath *search_path) { - int n_path_offset = search_path->path_length; + int path_offset = search_path->path_length; - RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, + RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "PAP-5090: invalid path offset"); - while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) - brelse(PATH_OFFSET_PBUFFER(search_path, n_path_offset--)); + while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) + brelse(PATH_OFFSET_PBUFFER(search_path, path_offset--)); search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } @@ -572,16 +569,16 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s by the calling function. It is filled up by this function. */ - int n_stop_level /* How far down the tree to search. To + int stop_level /* How far down the tree to search. To stop at leaf level - set to DISK_LEAF_NODE_LEVEL */ ) { - b_blocknr_t n_block_number; + b_blocknr_t block_number; int expected_level; struct buffer_head *bh; struct path_element *last_element; - int n_node_level, n_retval; + int node_level, retval; int right_neighbor_of_leaf_node; int fs_gen; struct buffer_head *reada_bh[SEARCH_BY_KEY_READA]; @@ -589,7 +586,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s int reada_count = 0; #ifdef CONFIG_REISERFS_CHECK - int n_repeat_counter = 0; + int repeat_counter = 0; #endif PROC_INFO_INC(sb, search_by_key); @@ -605,16 +602,16 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s /* With each iteration of this loop we search through the items in the current node, and calculate the next current node(next path element) for the next iteration of this loop.. */ - n_block_number = SB_ROOT_BLOCK(sb); + block_number = SB_ROOT_BLOCK(sb); expected_level = -1; while (1) { #ifdef CONFIG_REISERFS_CHECK - if (!(++n_repeat_counter % 50000)) + if (!(++repeat_counter % 50000)) reiserfs_warning(sb, "PAP-5100", "%s: there were %d iterations of " "while loop looking for key %K", - current->comm, n_repeat_counter, + current->comm, repeat_counter, key); #endif @@ -627,7 +624,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s /* Read the next tree node, and set the last element in the path to have a pointer to it. */ if ((bh = last_element->pe_buffer = - sb_getblk(sb, n_block_number))) { + sb_getblk(sb, block_number))) { if (!buffer_uptodate(bh) && reada_count > 1) search_by_key_reada(sb, reada_bh, reada_blocks, reada_count); @@ -661,7 +658,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s /* Get the root block number so that we can repeat the search starting from the root. */ - n_block_number = SB_ROOT_BLOCK(sb); + block_number = SB_ROOT_BLOCK(sb); expected_level = -1; right_neighbor_of_leaf_node = 0; @@ -694,26 +691,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s } /* ok, we have acquired next formatted node in the tree */ - n_node_level = B_LEVEL(bh); + node_level = B_LEVEL(bh); - PROC_INFO_BH_STAT(sb, bh, n_node_level - 1); + PROC_INFO_BH_STAT(sb, bh, node_level - 1); - RFALSE(n_node_level < n_stop_level, + RFALSE(node_level < stop_level, "vs-5152: tree level (%d) is less than stop level (%d)", - n_node_level, n_stop_level); + node_level, stop_level); - n_retval = bin_search(key, B_N_PITEM_HEAD(bh, 0), + retval = bin_search(key, B_N_PITEM_HEAD(bh, 0), B_NR_ITEMS(bh), - (n_node_level == + (node_level == DISK_LEAF_NODE_LEVEL) ? IH_SIZE : KEY_SIZE, &(last_element->pe_position)); - if (n_node_level == n_stop_level) { - return n_retval; + if (node_level == stop_level) { + return retval; } /* we are not in the stop level */ - if (n_retval == ITEM_FOUND) + if (retval == ITEM_FOUND) /* item has been found, so we choose the pointer which is to the right of the found one */ last_element->pe_position++; @@ -724,12 +721,12 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s /* So we have chosen a position in the current node which is an internal node. Now we calculate child block number by position in the node. */ - n_block_number = + block_number = B_N_CHILD_NUM(bh, last_element->pe_position); /* if we are going to read leaf nodes, try for read ahead as well */ if ((search_path->reada & PATH_READA) && - n_node_level == DISK_LEAF_NODE_LEVEL + 1) { + node_level == DISK_LEAF_NODE_LEVEL + 1) { int pos = last_element->pe_position; int limit = B_NR_ITEMS(bh); struct reiserfs_key *le_key; @@ -781,7 +778,7 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b ) { struct item_head *p_le_ih; /* pointer to on-disk structure */ - int n_blk_size; + int blk_size; loff_t item_offset, offset; struct reiserfs_dir_entry de; int retval; @@ -816,7 +813,7 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path), --PATH_LAST_POSITION(search_path)); - n_blk_size = sb->s_blocksize; + blk_size = sb->s_blocksize; if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) { return FILE_NOT_FOUND; @@ -828,10 +825,10 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b /* Needed byte is contained in the item pointed to by the path. */ if (item_offset <= offset && - item_offset + op_bytes_number(p_le_ih, n_blk_size) > offset) { + item_offset + op_bytes_number(p_le_ih, blk_size) > offset) { pos_in_item(search_path) = offset - item_offset; if (is_indirect_le_ih(p_le_ih)) { - pos_in_item(search_path) /= n_blk_size; + pos_in_item(search_path) /= blk_size; } return POSITION_FOUND; } @@ -891,7 +888,7 @@ static inline int prepare_for_direct_item(struct treepath *path, if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) { // round_len = ROUND_UP(new_file_length); - /* this was n_new_file_length < le_ih ... */ + /* this was new_file_length < le_ih ... */ if (round_len < le_ih_k_offset(le_ih)) { *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete this item. */ @@ -953,7 +950,7 @@ static inline int prepare_for_direntry_item(struct treepath *path, This function returns a determination of what balance mode the calling function should employ. */ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed from end of the file. */ - int *cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */ + int *cut_size, unsigned long long new_file_length /* MAX_KEY_OFFSET in case of delete. */ ) { struct super_block *sb = inode->i_sb; @@ -965,7 +962,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st /* Stat_data item. */ if (is_statdata_le_ih(p_le_ih)) { - RFALSE(n_new_file_length != max_reiserfs_offset(inode), + RFALSE(new_file_length != max_reiserfs_offset(inode), "PAP-5210: mode must be M_DELETE"); *cut_size = -(IH_SIZE + ih_item_len(p_le_ih)); @@ -975,13 +972,13 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st /* Directory item. */ if (is_direntry_le_ih(p_le_ih)) return prepare_for_direntry_item(path, p_le_ih, inode, - n_new_file_length, + new_file_length, cut_size); /* Direct item. */ if (is_direct_le_ih(p_le_ih)) return prepare_for_direct_item(path, p_le_ih, inode, - n_new_file_length, cut_size); + new_file_length, cut_size); /* Case of an indirect item. */ { @@ -992,10 +989,10 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st int result = M_CUT; int pos = 0; - if ( n_new_file_length == max_reiserfs_offset (inode) ) { + if ( new_file_length == max_reiserfs_offset (inode) ) { /* prepare_for_delete_or_cut() is called by * reiserfs_delete_item() */ - n_new_file_length = 0; + new_file_length = 0; delete = 1; } @@ -1006,7 +1003,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); pos = I_UNFM_NUM(&s_ih); - while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) { + while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) { __le32 *unfm; __u32 block; @@ -1062,35 +1059,34 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st } /* Calculate number of bytes which will be deleted or cut during balance */ -static int calc_deleted_bytes_number(struct tree_balance *tb, char c_mode) +static int calc_deleted_bytes_number(struct tree_balance *tb, char mode) { - int n_del_size; + int del_size; struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path); if (is_statdata_le_ih(p_le_ih)) return 0; - n_del_size = - (c_mode == + del_size = + (mode == M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0]; if (is_direntry_le_ih(p_le_ih)) { - // return EMPTY_DIR_SIZE; /* We delete emty directoris only. */ - // we can't use EMPTY_DIR_SIZE, as old format dirs have a different - // empty size. ick. FIXME, is this right? - // - return n_del_size; + /* return EMPTY_DIR_SIZE; We delete emty directoris only. + * we can't use EMPTY_DIR_SIZE, as old format dirs have a different + * empty size. ick. FIXME, is this right? */ + return del_size; } if (is_indirect_le_ih(p_le_ih)) - n_del_size = (n_del_size / UNFM_P_SIZE) * + del_size = (del_size / UNFM_P_SIZE) * (PATH_PLAST_BUFFER(tb->tb_path)->b_size); - return n_del_size; + return del_size; } static void init_tb_struct(struct reiserfs_transaction_handle *th, struct tree_balance *tb, struct super_block *sb, - struct treepath *path, int n_size) + struct treepath *path, int size) { BUG_ON(!th->t_trans_id); @@ -1101,7 +1097,7 @@ static void init_tb_struct(struct reiserfs_transaction_handle *th, tb->tb_path = path; PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; - tb->insert_size[0] = n_size; + tb->insert_size[0] = size; } void padd_item(char *item, int total_length, int length) @@ -1156,11 +1152,11 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct item_head s_ih; struct item_head *q_ih; int quota_cut_bytes; - int n_ret_value, n_del_size, n_removed; + int ret_value, del_size, removed; #ifdef CONFIG_REISERFS_CHECK - char c_mode; - int n_iter = 0; + char mode; + int iter = 0; #endif BUG_ON(!th->t_trans_id); @@ -1169,34 +1165,34 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, 0 /*size is unknown */ ); while (1) { - n_removed = 0; + removed = 0; #ifdef CONFIG_REISERFS_CHECK - n_iter++; - c_mode = + iter++; + mode = #endif prepare_for_delete_or_cut(th, inode, path, - item_key, &n_removed, - &n_del_size, + item_key, &removed, + &del_size, max_reiserfs_offset(inode)); - RFALSE(c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE"); + RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE"); copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); - s_del_balance.insert_size[0] = n_del_size; + s_del_balance.insert_size[0] = del_size; - n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL); - if (n_ret_value != REPEAT_SEARCH) + ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL); + if (ret_value != REPEAT_SEARCH) break; PROC_INFO_INC(sb, delete_item_restarted); // file system changed, repeat search - n_ret_value = + ret_value = search_for_position_by_key(sb, item_key, path); - if (n_ret_value == IO_ERROR) + if (ret_value == IO_ERROR) break; - if (n_ret_value == FILE_NOT_FOUND) { + if (ret_value == FILE_NOT_FOUND) { reiserfs_warning(sb, "vs-5340", "no items of the file %K found", item_key); @@ -1204,12 +1200,12 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, } } /* while (1) */ - if (n_ret_value != CARRY_ON) { + if (ret_value != CARRY_ON) { unfix_nodes(&s_del_balance); return 0; } // reiserfs_delete_item returns item length when success - n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE); + ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE); q_ih = get_ih(path); quota_cut_bytes = ih_item_len(q_ih); @@ -1255,7 +1251,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); memcpy(data + off, B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), - n_ret_value); + ret_value); kunmap_atomic(data, KM_USER0); } /* Perform balancing after all resources have been collected at once. */ @@ -1269,7 +1265,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes); /* Return deleted body length */ - return n_ret_value; + return ret_value; } /* Summary Of Mechanisms For Handling Collisions Between Processes: @@ -1432,13 +1428,13 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, struct page *page, struct treepath *path, const struct cpu_key *item_key, - loff_t n_new_file_size, char *mode) + loff_t new_file_size, char *mode) { struct super_block *sb = inode->i_sb; - int n_block_size = sb->s_blocksize; + int block_size = sb->s_blocksize; int cut_bytes; BUG_ON(!th->t_trans_id); - BUG_ON(n_new_file_size != inode->i_size); + BUG_ON(new_file_size != inode->i_size); /* the page being sent in could be NULL if there was an i/o error ** reading in the last block. The user will hit problems trying to @@ -1450,15 +1446,15 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, /* leave tail in an unformatted node */ *mode = M_SKIP_BALANCING; cut_bytes = - n_block_size - (n_new_file_size & (n_block_size - 1)); + block_size - (new_file_size & (block_size - 1)); pathrelse(path); return cut_bytes; } /* Perform the conversion to a direct_item. */ /* return indirect_to_direct(inode, path, item_key, - n_new_file_size, mode); */ + new_file_size, mode); */ return indirect2direct(th, inode, page, path, item_key, - n_new_file_size, mode); + new_file_size, mode); } /* we did indirect_to_direct conversion. And we have inserted direct @@ -1512,7 +1508,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, struct treepath *path, struct cpu_key *item_key, struct inode *inode, - struct page *page, loff_t n_new_file_size) + struct page *page, loff_t new_file_size) { struct super_block *sb = inode->i_sb; /* Every function which is going to call do_balance must first @@ -1521,10 +1517,10 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, After that we can make tree balancing. */ struct tree_balance s_cut_balance; struct item_head *p_le_ih; - int n_cut_size = 0, /* Amount to be cut. */ - n_ret_value = CARRY_ON, n_removed = 0, /* Number of the removed unformatted nodes. */ - n_is_inode_locked = 0; - char c_mode; /* Mode of the balance. */ + int cut_size = 0, /* Amount to be cut. */ + ret_value = CARRY_ON, removed = 0, /* Number of the removed unformatted nodes. */ + is_inode_locked = 0; + char mode; /* Mode of the balance. */ int retval2 = -1; int quota_cut_bytes; loff_t tail_pos = 0; @@ -1532,7 +1528,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); init_tb_struct(th, &s_cut_balance, inode->i_sb, path, - n_cut_size); + cut_size); /* Repeat this loop until we either cut the item without needing to balance, or we fix_nodes without schedule occurring */ @@ -1542,30 +1538,30 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, free unformatted nodes which are pointed to by the cut pointers. */ - c_mode = + mode = prepare_for_delete_or_cut(th, inode, path, - item_key, &n_removed, - &n_cut_size, n_new_file_size); - if (c_mode == M_CONVERT) { + item_key, &removed, + &cut_size, new_file_size); + if (mode == M_CONVERT) { /* convert last unformatted node to direct item or leave tail in the unformatted node */ - RFALSE(n_ret_value != CARRY_ON, + RFALSE(ret_value != CARRY_ON, "PAP-5570: can not convert twice"); - n_ret_value = + ret_value = maybe_indirect_to_direct(th, inode, page, path, item_key, - n_new_file_size, &c_mode); - if (c_mode == M_SKIP_BALANCING) + new_file_size, &mode); + if (mode == M_SKIP_BALANCING) /* tail has been left in the unformatted node */ - return n_ret_value; + return ret_value; - n_is_inode_locked = 1; + is_inode_locked = 1; /* removing of last unformatted node will change value we have to return to truncate. Save it */ - retval2 = n_ret_value; - /*retval2 = sb->s_blocksize - (n_new_file_size & (sb->s_blocksize - 1)); */ + retval2 = ret_value; + /*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */ /* So, we have performed the first part of the conversion: inserting the new direct item. Now we are removing the @@ -1573,10 +1569,10 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, it. */ set_cpu_key_k_type(item_key, TYPE_INDIRECT); item_key->key_length = 4; - n_new_file_size -= - (n_new_file_size & (sb->s_blocksize - 1)); - tail_pos = n_new_file_size; - set_cpu_key_k_offset(item_key, n_new_file_size + 1); + new_file_size -= + (new_file_size & (sb->s_blocksize - 1)); + tail_pos = new_file_size; + set_cpu_key_k_offset(item_key, new_file_size + 1); if (search_for_position_by_key (sb, item_key, path) == POSITION_NOT_FOUND) { @@ -1589,38 +1585,38 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, } continue; } - if (n_cut_size == 0) { + if (cut_size == 0) { pathrelse(path); return 0; } - s_cut_balance.insert_size[0] = n_cut_size; + s_cut_balance.insert_size[0] = cut_size; - n_ret_value = fix_nodes(c_mode, &s_cut_balance, NULL, NULL); - if (n_ret_value != REPEAT_SEARCH) + ret_value = fix_nodes(mode, &s_cut_balance, NULL, NULL); + if (ret_value != REPEAT_SEARCH) break; PROC_INFO_INC(sb, cut_from_item_restarted); - n_ret_value = + ret_value = search_for_position_by_key(sb, item_key, path); - if (n_ret_value == POSITION_FOUND) + if (ret_value == POSITION_FOUND) continue; reiserfs_warning(sb, "PAP-5610", "item %K not found", item_key); unfix_nodes(&s_cut_balance); - return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT; + return (ret_value == IO_ERROR) ? -EIO : -ENOENT; } /* while */ // check fix_nodes results (IO_ERROR or NO_DISK_SPACE) - if (n_ret_value != CARRY_ON) { - if (n_is_inode_locked) { + if (ret_value != CARRY_ON) { + if (is_inode_locked) { // FIXME: this seems to be not needed: we are always able // to cut item indirect_to_direct_roll_back(th, inode, path); } - if (n_ret_value == NO_DISK_SPACE) + if (ret_value == NO_DISK_SPACE) reiserfs_warning(sb, "reiserfs-5092", "NO_DISK_SPACE"); unfix_nodes(&s_cut_balance); @@ -1629,24 +1625,24 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, /* go ahead and perform balancing */ - RFALSE(c_mode == M_PASTE || c_mode == M_INSERT, "invalid mode"); + RFALSE(mode == M_PASTE || mode == M_INSERT, "invalid mode"); /* Calculate number of bytes that need to be cut from the item. */ quota_cut_bytes = - (c_mode == + (mode == M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance. insert_size[0]; if (retval2 == -1) - n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode); + ret_value = calc_deleted_bytes_number(&s_cut_balance, mode); else - n_ret_value = retval2; + ret_value = retval2; /* For direct items, we only change the quota when deleting the last ** item. */ p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) { - if (c_mode == M_DELETE && + if (mode == M_DELETE && (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) == 1) { // FIXME: this is to keep 3.5 happy @@ -1657,7 +1653,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, } } #ifdef CONFIG_REISERFS_CHECK - if (n_is_inode_locked) { + if (is_inode_locked) { struct item_head *le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); /* we are going to complete indirect2direct conversion. Make @@ -1667,13 +1663,13 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, reiserfs_panic(sb, "vs-5652", "item must be indirect %h", le_ih); - if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE) + if (mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE) reiserfs_panic(sb, "vs-5653", "completing " "indirect2direct conversion indirect " "item %h being deleted must be of " "4 byte long", le_ih); - if (c_mode == M_CUT + if (mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) { reiserfs_panic(sb, "vs-5654", "can not complete " "indirect2direct conversion of %h " @@ -1685,8 +1681,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, } #endif - do_balance(&s_cut_balance, NULL, NULL, c_mode); - if (n_is_inode_locked) { + do_balance(&s_cut_balance, NULL, NULL, mode); + if (is_inode_locked) { /* we've done an indirect->direct conversion. when the data block ** was freed, it was removed from the list of blocks that must ** be flushed before the transaction commits, make sure to @@ -1701,7 +1697,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, quota_cut_bytes, inode->i_uid, '?'); #endif DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes); - return n_ret_value; + return ret_value; } static void truncate_directory(struct reiserfs_transaction_handle *th, @@ -1733,9 +1729,9 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, INITIALIZE_PATH(s_search_path); /* Path to the current object item. */ struct item_head *p_le_ih; /* Pointer to an item header. */ struct cpu_key s_item_key; /* Key to search for a previous file item. */ - loff_t n_file_size, /* Old file size. */ - n_new_file_size; /* New file size. */ - int n_deleted; /* Number of deleted or truncated bytes. */ + loff_t file_size, /* Old file size. */ + new_file_size; /* New file size. */ + int deleted; /* Number of deleted or truncated bytes. */ int retval; int err = 0; @@ -1752,7 +1748,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, } /* Get new file size. */ - n_new_file_size = inode->i_size; + new_file_size = inode->i_size; // FIXME: note, that key type is unimportant here make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode), @@ -1782,7 +1778,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, /* Get real file size (total length of all file items) */ p_le_ih = PATH_PITEM_HEAD(&s_search_path); if (is_statdata_le_ih(p_le_ih)) - n_file_size = 0; + file_size = 0; else { loff_t offset = le_ih_k_offset(p_le_ih); int bytes = @@ -1791,42 +1787,42 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, /* this may mismatch with real file size: if last direct item had no padding zeros and last unformatted node had no free space, this file would have this file size */ - n_file_size = offset + bytes - 1; + file_size = offset + bytes - 1; } /* * are we doing a full truncate or delete, if so * kick in the reada code */ - if (n_new_file_size == 0) + if (new_file_size == 0) s_search_path.reada = PATH_READA | PATH_READA_BACK; - if (n_file_size == 0 || n_file_size < n_new_file_size) { + if (file_size == 0 || file_size < new_file_size) { goto update_and_out; } /* Update key to search for the last file item. */ - set_cpu_key_k_offset(&s_item_key, n_file_size); + set_cpu_key_k_offset(&s_item_key, file_size); do { /* Cut or delete file item. */ - n_deleted = + deleted = reiserfs_cut_from_item(th, &s_search_path, &s_item_key, - inode, page, n_new_file_size); - if (n_deleted < 0) { + inode, page, new_file_size); + if (deleted < 0) { reiserfs_warning(inode->i_sb, "vs-5665", "reiserfs_cut_from_item failed"); reiserfs_check_path(&s_search_path); return 0; } - RFALSE(n_deleted > n_file_size, + RFALSE(deleted > file_size, "PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K", - n_deleted, n_file_size, &s_item_key); + deleted, file_size, &s_item_key); /* Change key to search the last file item. */ - n_file_size -= n_deleted; + file_size -= deleted; - set_cpu_key_k_offset(&s_item_key, n_file_size); + set_cpu_key_k_offset(&s_item_key, file_size); /* While there are bytes to truncate and previous file item is presented in the tree. */ @@ -1857,13 +1853,13 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, goto out; reiserfs_update_inode_transaction(inode); } - } while (n_file_size > ROUND_UP(n_new_file_size) && + } while (file_size > ROUND_UP(new_file_size) && search_for_position_by_key(inode->i_sb, &s_item_key, &s_search_path) == POSITION_FOUND); - RFALSE(n_file_size > ROUND_UP(n_new_file_size), + RFALSE(file_size > ROUND_UP(new_file_size), "PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d", - n_new_file_size, n_file_size, s_item_key.on_disk_key.k_objectid); + new_file_size, file_size, s_item_key.on_disk_key.k_objectid); update_and_out: if (update_timestamps) { @@ -1918,7 +1914,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree const struct cpu_key *key, /* Key to search for the needed item. */ struct inode *inode, /* Inode item belongs to */ const char *body, /* Pointer to the bytes to paste. */ - int n_pasted_size) + int pasted_size) { /* Size of pasted bytes. */ struct tree_balance s_paste_balance; int retval; @@ -1931,16 +1927,16 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): allocating %u id=%u type=%c", - n_pasted_size, inode->i_uid, + pasted_size, inode->i_uid, key2type(&(key->on_disk_key))); #endif - if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) { + if (DQUOT_ALLOC_SPACE_NODIRTY(inode, pasted_size)) { pathrelse(search_path); return -EDQUOT; } init_tb_struct(th, &s_paste_balance, th->t_super, search_path, - n_pasted_size); + pasted_size); #ifdef DISPLACE_NEW_PACKING_LOCALITIES s_paste_balance.key = key->on_disk_key; #endif @@ -1988,10 +1984,10 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): freeing %u id=%u type=%c", - n_pasted_size, inode->i_uid, + pasted_size, inode->i_uid, key2type(&(key->on_disk_key))); #endif - DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size); + DQUOT_FREE_SPACE_NODIRTY(inode, pasted_size); return retval; } diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index 2b90c0e5697c..d7f6e51bef2a 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -26,7 +26,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, converted item. */ struct item_head ind_ih; /* new indirect item to be inserted or key of unfm pointer to be pasted */ - int n_blk_size, n_retval; /* returned value for reiserfs_insert_item and clones */ + int blk_size, retval; /* returned value for reiserfs_insert_item and clones */ unp_t unfm_ptr; /* Handle on an unformatted node that will be inserted in the tree. */ @@ -35,7 +35,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, REISERFS_SB(sb)->s_direct2indirect++; - n_blk_size = sb->s_blocksize; + blk_size = sb->s_blocksize; /* and key to search for append or insert pointer to the new unformatted node. */ @@ -64,17 +64,17 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, set_ih_free_space(&ind_ih, 0); /* delete at nearest future */ put_ih_item_len(&ind_ih, UNFM_P_SIZE); PATH_LAST_POSITION(path)++; - n_retval = + retval = reiserfs_insert_item(th, path, &end_key, &ind_ih, inode, (char *)&unfm_ptr); } else { /* Paste into last indirect item of an object. */ - n_retval = reiserfs_paste_into_item(th, path, &end_key, inode, + retval = reiserfs_paste_into_item(th, path, &end_key, inode, (char *)&unfm_ptr, UNFM_P_SIZE); } - if (n_retval) { - return n_retval; + if (retval) { + return retval; } // note: from here there are two keys which have matching first // three key components. They only differ by the fourth one. @@ -98,7 +98,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, RFALSE(!is_direct_le_ih(p_le_ih), "vs-14055: direct item expected(%K), found %h", &end_key, p_le_ih); - tail_size = (le_ih_k_offset(p_le_ih) & (n_blk_size - 1)) + tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1)) + ih_item_len(p_le_ih) - 1; /* we only send the unbh pointer if the buffer is not up to date. @@ -113,11 +113,11 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, } else { up_to_date_bh = unbh; } - n_retval = reiserfs_delete_item(th, path, &end_key, inode, + retval = reiserfs_delete_item(th, path, &end_key, inode, up_to_date_bh); - total_tail += n_retval; - if (tail_size == n_retval) + total_tail += retval; + if (tail_size == retval) // done: file does not have direct items anymore break; @@ -129,7 +129,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0); - memset(kaddr + pgoff, 0, n_blk_size - total_tail); + memset(kaddr + pgoff, 0, blk_size - total_tail); kunmap_atomic(kaddr, KM_USER0); } @@ -181,7 +181,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, { struct super_block *sb = inode->i_sb; struct item_head s_ih; - unsigned long n_block_size = sb->s_blocksize; + unsigned long block_size = sb->s_blocksize; char *tail; int tail_len, round_tail_len; loff_t pos, pos1; /* position of first byte of the tail */ @@ -196,7 +196,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, /* store item head path points to. */ copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); - tail_len = (n_new_file_size & (n_block_size - 1)); + tail_len = (n_new_file_size & (block_size - 1)); if (get_inode_sd_version(inode) == STAT_DATA_V2) round_tail_len = ROUND_UP(tail_len); else @@ -257,7 +257,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, unformatted node. For now i_size is considered as guard for going out of file size */ kunmap(page); - return n_block_size - round_tail_len; + return block_size - round_tail_len; } kunmap(page); @@ -276,5 +276,5 @@ int indirect2direct(struct reiserfs_transaction_handle *th, /* mark_file_with_tail (inode, pos1 + 1); */ REISERFS_I(inode)->i_first_direct_byte = pos1 + 1; - return n_block_size - round_tail_len; + return block_size - round_tail_len; } -- cgit v1.2.3-59-g8ed1b