提交 098297b2 编写于 作者: J Jeff Mahoney 提交者: Jan Kara

reiserfs: cleanup, reformat comments to normal kernel style

This patch reformats comments in the reiserfs code to fit in 80 columns and
to follow the style rules.

There is no functional change but it helps make my eyes bleed less.
Signed-off-by: NJeff Mahoney <jeffm@suse.com>
Signed-off-by: NJan Kara <jack@suse.cz>
上级 4cf5f7ad
......@@ -50,8 +50,10 @@ static inline void get_bit_address(struct super_block *s,
unsigned int *bmap_nr,
unsigned int *offset)
{
/* It is in the bitmap block number equal to the block
* number divided by the number of bits in a block. */
/*
* It is in the bitmap block number equal to the block
* number divided by the number of bits in a block.
*/
*bmap_nr = block >> (s->s_blocksize_bits + 3);
/* Within that bitmap block it is located at bit offset *offset. */
*offset = block & ((s->s_blocksize << 3) - 1);
......@@ -71,8 +73,10 @@ int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
get_bit_address(s, block, &bmap, &offset);
/* Old format filesystem? Unlikely, but the bitmaps are all up front so
* we need to account for it. */
/*
* Old format filesystem? Unlikely, but the bitmaps are all
* up front so we need to account for it.
*/
if (unlikely(test_bit(REISERFS_OLD_FORMAT,
&(REISERFS_SB(s)->s_properties)))) {
b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
......@@ -108,8 +112,11 @@ int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
return 1;
}
/* searches in journal structures for a given block number (bmap, off). If block
is found in reiserfs journal it suggests next free block candidate to test. */
/*
* Searches in journal structures for a given block number (bmap, off).
* If block is found in reiserfs journal it suggests next free block
* candidate to test.
*/
static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
int off, int *next)
{
......@@ -120,7 +127,7 @@ static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
*next = tmp;
PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
} else {
(*next) = off + 1; /* inc offset to avoid looping. */
(*next) = off + 1; /* inc offset to avoid looping. */
PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
}
PROC_INFO_INC(s, scan_bitmap.retry);
......@@ -129,8 +136,10 @@ static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
return 0;
}
/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
* block; */
/*
* Searches for a window of zero bits with given minimum and maximum
* lengths in one bitmap block
*/
static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
unsigned int bmap_n, int *beg, int boundary,
int min, int max, int unfm)
......@@ -146,10 +155,6 @@ static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
"range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
PROC_INFO_INC(s, scan_bitmap.bmap);
/* this is unclear and lacks comments, explain how journal bitmaps
work here for the reader. Convey a sense of the design here. What
is a window? */
/* - I mean `a window of zero bits' as in description of this function - Zam. */
if (!bi) {
reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
......@@ -165,15 +170,18 @@ static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
cont:
if (bi->free_count < min) {
brelse(bh);
return 0; // No free blocks in this bitmap
return 0; /* No free blocks in this bitmap */
}
/* search for a first zero bit -- beginning of a window */
*beg = reiserfs_find_next_zero_le_bit
((unsigned long *)(bh->b_data), boundary, *beg);
if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
* cannot contain a zero window of minimum size */
/*
* search for a zero bit fails or the rest of bitmap block
* cannot contain a zero window of minimum size
*/
if (*beg + min > boundary) {
brelse(bh);
return 0;
}
......@@ -187,37 +195,63 @@ static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
next = end;
break;
}
/* finding the other end of zero bit window requires looking into journal structures (in
* case of searching for free blocks for unformatted nodes) */
/*
* finding the other end of zero bit window requires
* looking into journal structures (in case of
* searching for free blocks for unformatted nodes)
*/
if (unfm && is_block_in_journal(s, bmap_n, end, &next))
break;
}
/* now (*beg) points to beginning of zero bits window,
* (end) points to one bit after the window end */
if (end - *beg >= min) { /* it seems we have found window of proper size */
/*
* now (*beg) points to beginning of zero bits window,
* (end) points to one bit after the window end
*/
/* found window of proper size */
if (end - *beg >= min) {
int i;
reiserfs_prepare_for_journal(s, bh, 1);
/* try to set all blocks used checking are they still free */
/*
* try to set all blocks used checking are
* they still free
*/
for (i = *beg; i < end; i++) {
/* It seems that we should not check in journal again. */
/* Don't check in journal again. */
if (reiserfs_test_and_set_le_bit
(i, bh->b_data)) {
/* bit was set by another process
* while we slept in prepare_for_journal() */
/*
* bit was set by another process while
* we slept in prepare_for_journal()
*/
PROC_INFO_INC(s, scan_bitmap.stolen);
if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
* if length of this set is more or equal to `min' */
/*
* we can continue with smaller set
* of allocated blocks, if length of
* this set is more or equal to `min'
*/
if (i >= *beg + min) {
end = i;
break;
}
/* otherwise we clear all bit were set ... */
/*
* otherwise we clear all bit
* were set ...
*/
while (--i >= *beg)
reiserfs_clear_le_bit
(i, bh->b_data);
reiserfs_restore_prepared_buffer(s, bh);
*beg = org;
/* ... and search again in current block from beginning */
/*
* Search again in current block
* from beginning
*/
goto cont;
}
}
......@@ -268,11 +302,13 @@ static inline int block_group_used(struct super_block *s, u32 id)
int bm = bmap_hash_id(s, id);
struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
/* If we don't have cached information on this bitmap block, we're
/*
* If we don't have cached information on this bitmap block, we're
* going to have to load it later anyway. Loading it here allows us
* to make a better decision. This favors long-term performance gain
* with a better on-disk layout vs. a short term gain of skipping the
* read and potentially having a bad placement. */
* read and potentially having a bad placement.
*/
if (info->free_count == UINT_MAX) {
struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
brelse(bh);
......@@ -305,17 +341,16 @@ __le32 reiserfs_choose_packing(struct inode * dir)
return packing;
}
/* Tries to find contiguous zero bit window (given size) in given region of
* bitmap and place new blocks there. Returns number of allocated blocks. */
/*
* Tries to find contiguous zero bit window (given size) in given region of
* bitmap and place new blocks there. Returns number of allocated blocks.
*/
static int scan_bitmap(struct reiserfs_transaction_handle *th,
b_blocknr_t * start, b_blocknr_t finish,
int min, int max, int unfm, sector_t file_block)
{
int nr_allocated = 0;
struct super_block *s = th->t_super;
/* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
* - Hans, it is not a block number - Zam. */
unsigned int bm, off;
unsigned int end_bm, end_off;
unsigned int off_max = s->s_blocksize << 3;
......@@ -323,8 +358,10 @@ static int scan_bitmap(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
PROC_INFO_INC(s, scan_bitmap.call);
/* No point in looking for more free blocks */
if (SB_FREE_BLOCKS(s) <= 0)
return 0; // No point in looking for more free blocks
return 0;
get_bit_address(s, *start, &bm, &off);
get_bit_address(s, finish, &end_bm, &end_off);
......@@ -333,7 +370,8 @@ static int scan_bitmap(struct reiserfs_transaction_handle *th,
if (end_bm > reiserfs_bmap_count(s))
end_bm = reiserfs_bmap_count(s);
/* When the bitmap is more than 10% free, anyone can allocate.
/*
* When the bitmap is more than 10% free, anyone can allocate.
* When it's less than 10% free, only files that already use the
* bitmap are allowed. Once we pass 80% full, this restriction
* is lifted.
......@@ -532,7 +570,8 @@ int reiserfs_parse_alloc_options(struct super_block *s, char *options)
{
char *this_char, *value;
REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
/* clear default settings */
REISERFS_SB(s)->s_alloc_options.bits = 0;
while ((this_char = strsep(&options, ":")) != NULL) {
if ((value = strchr(this_char, '=')) != NULL)
......@@ -733,7 +772,7 @@ static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
hash_in = (char *)&hint->key.k_dir_id;
} else {
if (!hint->inode) {
//hint->search_start = hint->beg;
/*hint->search_start = hint->beg;*/
hash_in = (char *)&hint->key.k_dir_id;
} else
if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
......@@ -786,7 +825,8 @@ static void oid_groups(reiserfs_blocknr_hint_t * hint)
dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
/* keep the root dir and it's first set of subdirs close to
/*
* keep the root dir and it's first set of subdirs close to
* the start of the disk
*/
if (dirid <= 2)
......@@ -800,7 +840,8 @@ static void oid_groups(reiserfs_blocknr_hint_t * hint)
}
}
/* returns 1 if it finds an indirect item and gets valid hint info
/*
* returns 1 if it finds an indirect item and gets valid hint info
* from it, otherwise 0
*/
static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
......@@ -812,8 +853,11 @@ static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
__le32 *item;
int ret = 0;
if (!hint->path) /* reiserfs code can call this function w/o pointer to path
* structure supplied; then we rely on supplied search_start */
/*
* reiserfs code can call this function w/o pointer to path
* structure supplied; then we rely on supplied search_start
*/
if (!hint->path)
return 0;
path = hint->path;
......@@ -825,12 +869,13 @@ static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
hint->search_start = bh->b_blocknr;
/*
* for indirect item: go to left and look for the first non-hole entry
* in the indirect item
*/
if (!hint->formatted_node && is_indirect_le_ih(ih)) {
/* for indirect item: go to left and look for the first non-hole entry
in the indirect item */
if (pos_in_item == I_UNFM_NUM(ih))
pos_in_item--;
// pos_in_item = I_UNFM_NUM (ih) - 1;
while (pos_in_item >= 0) {
int t = get_block_num(item, pos_in_item);
if (t) {
......@@ -846,10 +891,12 @@ static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
return ret;
}
/* should be, if formatted node, then try to put on first part of the device
specified as number of percent with mount option device, else try to put
on last of device. This is not to say it is good code to do so,
but the effect should be measured. */
/*
* should be, if formatted node, then try to put on first part of the device
* specified as number of percent with mount option device, else try to put
* on last of device. This is not to say it is good code to do so,
* but the effect should be measured.
*/
static inline void set_border_in_hint(struct super_block *s,
reiserfs_blocknr_hint_t * hint)
{
......@@ -975,21 +1022,27 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
set_border_in_hint(s, hint);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
/* whenever we create a new directory, we displace it. At first we will
hash for location, later we might look for a moderately empty place for
it */
/*
* whenever we create a new directory, we displace it. At first
* we will hash for location, later we might look for a moderately
* empty place for it
*/
if (displacing_new_packing_localities(s)
&& hint->th->displace_new_blocks) {
displace_new_packing_locality(hint);
/* we do not continue determine_search_start,
* if new packing locality is being displaced */
/*
* we do not continue determine_search_start,
* if new packing locality is being displaced
*/
return;
}
#endif
/* all persons should feel encouraged to add more special cases here and
* test them */
/*
* all persons should feel encouraged to add more special cases
* here and test them
*/
if (displacing_large_files(s) && !hint->formatted_node
&& this_blocknr_allocation_would_make_it_a_large_file(hint)) {
......@@ -997,8 +1050,10 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
return;
}
/* if none of our special cases is relevant, use the left neighbor in the
tree order of the new node we are allocating for */
/*
* if none of our special cases is relevant, use the left
* neighbor in the tree order of the new node we are allocating for
*/
if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
hash_formatted_node(hint);
return;
......@@ -1006,10 +1061,13 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
unfm_hint = get_left_neighbor(hint);
/* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
new blocks are displaced based on directory ID. Also, if suggested search_start
is less than last preallocated block, we start searching from it, assuming that
HDD dataflow is faster in forward direction */
/*
* Mimic old block allocator behaviour, that is if VFS allowed for
* preallocation, new blocks are displaced based on directory ID.
* Also, if suggested search_start is less than last preallocated
* block, we start searching from it, assuming that HDD dataflow
* is faster in forward direction
*/
if (TEST_OPTION(old_way, s)) {
if (!hint->formatted_node) {
if (!reiserfs_hashed_relocation(s))
......@@ -1038,11 +1096,13 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
TEST_OPTION(old_hashed_relocation, s)) {
old_hashed_relocation(hint);
}
/* new_hashed_relocation works with both formatted/unformatted nodes */
if ((!unfm_hint || hint->formatted_node) &&
TEST_OPTION(new_hashed_relocation, s)) {
new_hashed_relocation(hint);
}
/* dirid grouping works only on unformatted nodes */
if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
dirid_groups(hint);
......@@ -1080,8 +1140,6 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
return CARRY_ON;
}
/* XXX I know it could be merged with upper-level function;
but may be result function would be too complex. */
static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
b_blocknr_t * new_blocknrs,
b_blocknr_t start,
......@@ -1109,7 +1167,10 @@ static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
/* do we have something to fill prealloc. array also ? */
if (nr_allocated > 0) {
/* it means prealloc_size was greater that 0 and we do preallocation */
/*
* it means prealloc_size was greater that 0 and
* we do preallocation
*/
list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
&SB_JOURNAL(hint->th->t_super)->
j_prealloc_list);
......@@ -1177,7 +1238,8 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
start = 0;
finish = hint->beg;
break;
default: /* We've tried searching everywhere, not enough space */
default:
/* We've tried searching everywhere, not enough space */
/* Free the blocks */
if (!hint->formatted_node) {
#ifdef REISERQUOTA_DEBUG
......@@ -1262,8 +1324,11 @@ static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
return amount_needed;
}
int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have
already reserved */ )
int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
b_blocknr_t *new_blocknrs,
int amount_needed,
/* Amount of blocks we have already reserved */
int reserved_by_us)
{
int initial_amount_needed = amount_needed;
int ret;
......@@ -1275,15 +1340,21 @@ int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new
return NO_DISK_SPACE;
/* should this be if !hint->inode && hint->preallocate? */
/* do you mean hint->formatted_node can be removed ? - Zam */
/* hint->formatted_node cannot be removed because we try to access
inode information here, and there is often no inode assotiated with
metadata allocations - green */
/*
* hint->formatted_node cannot be removed because we try to access
* inode information here, and there is often no inode associated with
* metadata allocations - green
*/
if (!hint->formatted_node && hint->preallocate) {
amount_needed = use_preallocated_list_if_available
(hint, new_blocknrs, amount_needed);
if (amount_needed == 0) /* all blocknrs we need we got from
prealloc. list */
/*
* We have all the block numbers we need from the
* prealloc list
*/
if (amount_needed == 0)
return CARRY_ON;
new_blocknrs += (initial_amount_needed - amount_needed);
}
......@@ -1297,10 +1368,12 @@ int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new
ret = blocknrs_and_prealloc_arrays_from_search_start
(hint, new_blocknrs, amount_needed);
/* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
* need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
* variant) */
/*
* We used prealloc. list to fill (partially) new_blocknrs array.
* If final allocation fails we need to return blocks back to
* prealloc. list or just free them. -- Zam (I chose second
* variant)
*/
if (ret != CARRY_ON) {
while (amount_needed++ < initial_amount_needed) {
reiserfs_free_block(hint->th, hint->inode,
......@@ -1339,8 +1412,10 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
struct buffer_head *bh;
/* Way old format filesystems had the bitmaps packed up front.
* I doubt there are any of these left, but just in case... */
/*
* Way old format filesystems had the bitmaps packed up front.
* I doubt there are any of these left, but just in case...
*/
if (unlikely(test_bit(REISERFS_OLD_FORMAT,
&(REISERFS_SB(sb)->s_properties))))
block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
......
......@@ -59,7 +59,10 @@ static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *d
int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
{
struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
/* key of current position in the directory (key of directory entry) */
struct cpu_key pos_key;
INITIALIZE_PATH(path_to_entry);
struct buffer_head *bh;
int item_num, entry_num;
......@@ -77,21 +80,28 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
reiserfs_check_lock_depth(inode->i_sb, "readdir");
/* form key for search the next directory entry using f_pos field of
file structure */
/*
* form key for search the next directory entry using
* f_pos field of file structure
*/
make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
next_pos = cpu_key_k_offset(&pos_key);
path_to_entry.reada = PATH_READA;
while (1) {
research:
/* search the directory item, containing entry with specified key */
/*
* search the directory item, containing entry with
* specified key
*/
search_res =
search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry,
&de);
if (search_res == IO_ERROR) {
// FIXME: we could just skip part of directory which could
// not be read
/*
* FIXME: we could just skip part of directory
* which could not be read
*/
ret = -EIO;
goto out;
}
......@@ -109,14 +119,20 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
"vs-9005 item_num == %d, item amount == %d",
item_num, B_NR_ITEMS(bh));
/* and entry must be not more than number of entries in the item */
/*
* and entry must be not more than number of entries
* in the item
*/
RFALSE(ih_entry_count(ih) < entry_num,
"vs-9010: entry number is too big %d (%d)",
entry_num, ih_entry_count(ih));
/*
* go through all entries in the directory item beginning
* from the entry, that has been found
*/
if (search_res == POSITION_FOUND
|| entry_num < ih_entry_count(ih)) {
/* go through all entries in the directory item beginning from the entry, that has been found */
struct reiserfs_de_head *deh =
B_I_DEH(bh, ih) + entry_num;
......@@ -127,16 +143,18 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
ino_t d_ino;
loff_t cur_pos = deh_offset(deh);
/* it is hidden entry */
if (!de_visible(deh))
/* it is hidden entry */
continue;
d_reclen = entry_length(bh, ih, entry_num);
d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh);
if (d_reclen <= 0 ||
d_name + d_reclen > bh->b_data + bh->b_size) {
/* There is corrupted data in entry,
* We'd better stop here */
/*
* There is corrupted data in entry,
* We'd better stop here
*/
pathrelse(&path_to_entry);
ret = -EIO;
goto out;
......@@ -145,10 +163,10 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
if (!d_name[d_reclen - 1])
d_reclen = strlen(d_name);
/* too big to send back to VFS */
if (d_reclen >
REISERFS_MAX_NAME(inode->i_sb->
s_blocksize)) {
/* too big to send back to VFS */
continue;
}
......@@ -173,10 +191,14 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
goto research;
}
}
// Note, that we copy name to user space via temporary
// buffer (local_buf) because filldir will block if
// user space buffer is swapped out. At that time
// entry can move to somewhere else
/*
* Note, that we copy name to user space via
* temporary buffer (local_buf) because
* filldir will block if user space buffer is
* swapped out. At that time entry can move to
* somewhere else
*/
memcpy(local_buf, d_name, d_reclen);
/*
......@@ -209,22 +231,26 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
} /* for */
}
/* end of directory has been reached */
if (item_num != B_NR_ITEMS(bh) - 1)
// end of directory has been reached
goto end;
/* item we went through is last item of node. Using right
delimiting key check is it directory end */
/*
* item we went through is last item of node. Using right
* delimiting key check is it directory end
*/
rkey = get_rkey(&path_to_entry, inode->i_sb);
if (!comp_le_keys(rkey, &MIN_KEY)) {
/* set pos_key to key, that is the smallest and greater
that key of the last entry in the item */
/*
* set pos_key to key, that is the smallest and greater
* that key of the last entry in the item
*/
set_cpu_key_k_offset(&pos_key, next_pos);
continue;
}
/* end of directory has been reached */
if (COMP_SHORT_KEYS(rkey, &pos_key)) {
// end of directory has been reached
goto end;
}
......@@ -248,9 +274,10 @@ static int reiserfs_readdir(struct file *file, struct dir_context *ctx)
return reiserfs_readdir_inode(file_inode(file), ctx);
}
/* compose directory item containing "." and ".." entries (entries are
not aligned to 4 byte boundary) */
/* the last four params are LE */
/*
* compose directory item containing "." and ".." entries (entries are
* not aligned to 4 byte boundary)
*/
void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
__le32 par_dirid, __le32 par_objid)
{
......
......@@ -2,18 +2,13 @@
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
/* Now we have all buffers that must be used in balancing of the tree */
/* Further calculations can not cause schedule(), and thus the buffer */
/* tree will be stable until the balancing will be finished */
/* balance the tree according to the analysis made before, */
/* and using buffers obtained after all above. */
/**
** balance_leaf_when_delete
** balance_leaf
** do_balance
**
**/
/*
* Now we have all buffers that must be used in balancing of the tree
* Further calculations can not cause schedule(), and thus the buffer
* tree will be stable until the balancing will be finished
* balance the tree according to the analysis made before,
* and using buffers obtained after all above.
*/
#include <asm/uaccess.h>
#include <linux/time.h>
......@@ -68,35 +63,39 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
/* summary:
if deleting something ( tb->insert_size[0] < 0 )
return(balance_leaf_when_delete()); (flag d handled here)
else
if lnum is larger than 0 we put items into the left node
if rnum is larger than 0 we put items into the right node
if snum1 is larger than 0 we put items into the new node s1
if snum2 is larger than 0 we put items into the new node s2
Note that all *num* count new items being created.
It would be easier to read balance_leaf() if each of these summary
lines was a separate procedure rather than being inlined. I think
that there are many passages here and in balance_leaf_when_delete() in
which two calls to one procedure can replace two passages, and it
might save cache space and improve software maintenance costs to do so.
Vladimir made the perceptive comment that we should offload most of
the decision making in this function into fix_nodes/check_balance, and
then create some sort of structure in tb that says what actions should
be performed by do_balance.
-Hans */
/* Balance leaf node in case of delete or cut: insert_size[0] < 0
/*
* summary:
* if deleting something ( tb->insert_size[0] < 0 )
* return(balance_leaf_when_delete()); (flag d handled here)
* else
* if lnum is larger than 0 we put items into the left node
* if rnum is larger than 0 we put items into the right node
* if snum1 is larger than 0 we put items into the new node s1
* if snum2 is larger than 0 we put items into the new node s2
* Note that all *num* count new items being created.
*
* It would be easier to read balance_leaf() if each of these summary
* lines was a separate procedure rather than being inlined. I think
* that there are many passages here and in balance_leaf_when_delete() in
* which two calls to one procedure can replace two passages, and it
* might save cache space and improve software maintenance costs to do so.
*
* Vladimir made the perceptive comment that we should offload most of
* the decision making in this function into fix_nodes/check_balance, and
* then create some sort of structure in tb that says what actions should
* be performed by do_balance.
*
* -Hans
*/
/*
* Balance leaf node in case of delete or cut: insert_size[0] < 0
*
* lnum, rnum can have values >= -1
* -1 means that the neighbor must be joined with S
* 0 means that nothing should be done with the neighbor
* >0 means to shift entirely or partly the specified number of items to the neighbor
* >0 means to shift entirely or partly the specified number of items
* to the neighbor
*/
static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
{
......@@ -149,8 +148,16 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
case M_CUT:{ /* cut item in S[0] */
if (is_direntry_le_ih(ih)) {
/* UFS unlink semantics are such that you can only delete one directory entry at a time. */
/* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
/*
* UFS unlink semantics are such that you
* can only delete one directory entry at
* a time.
*/
/*
* when we cut a directory tb->insert_size[0]
* means number of entries to be cut (always 1)
*/
tb->insert_size[0] = -1;
leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
-tb->insert_size[0]);
......@@ -183,13 +190,22 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
"UNKNOWN"), flag);
}
/* the rule is that no shifting occurs unless by shifting a node can be freed */
/*
* the rule is that no shifting occurs unless by shifting
* a node can be freed
*/
n = B_NR_ITEMS(tbS0);
if (tb->lnum[0]) { /* L[0] takes part in balancing */
if (tb->lnum[0] == -1) { /* L[0] must be joined with S[0] */
if (tb->rnum[0] == -1) { /* R[0] must be also joined with S[0] */
/* L[0] takes part in balancing */
if (tb->lnum[0]) {
/* L[0] must be joined with S[0] */
if (tb->lnum[0] == -1) {
/* R[0] must be also joined with S[0] */
if (tb->rnum[0] == -1) {
if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
/* all contents of all the 3 buffers will be in L[0] */
/*
* all contents of all the 3 buffers
* will be in L[0]
*/
if (PATH_H_POSITION(tb->tb_path, 1) == 0
&& 1 < B_NR_ITEMS(tb->FR[0]))
replace_key(tb, tb->CFL[0],
......@@ -208,7 +224,10 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
return 0;
}
/* all contents of all the 3 buffers will be in R[0] */
/*
* all contents of all the 3 buffers will
* be in R[0]
*/
leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1,
NULL);
leaf_move_items(LEAF_FROM_L_TO_R, tb,
......@@ -233,7 +252,11 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
return 0;
}
/* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
/*
* a part of contents of S[0] will be in L[0] and the
* rest part of S[0] will be in R[0]
*/
RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
(tb->lnum[0] + tb->rnum[0] > n + 1),
......@@ -1178,9 +1201,7 @@ struct buffer_head *get_FEB(struct tree_balance *tb)
return tb->used[i];
}
/* This is now used because reiserfs_free_block has to be able to
** schedule.
*/
/* This is now used because reiserfs_free_block has to be able to schedule. */
static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
{
int i;
......@@ -1335,8 +1356,10 @@ static int check_before_balancing(struct tree_balance *tb)
"mount point.");
}
/* double check that buffers that we will modify are unlocked. (fix_nodes should already have
prepped all of these for us). */
/*
* double check that buffers that we will modify are unlocked.
* (fix_nodes should already have prepped all of these for us).
*/
if (tb->lnum[0]) {
retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
......@@ -1429,49 +1452,51 @@ static void check_internal_levels(struct tree_balance *tb)
#endif
/* Now we have all of the buffers that must be used in balancing of
the tree. We rely on the assumption that schedule() will not occur
while do_balance works. ( Only interrupt handlers are acceptable.)
We balance the tree according to the analysis made before this,
using buffers already obtained. For SMP support it will someday be
necessary to add ordered locking of tb. */
/* Some interesting rules of balancing:
we delete a maximum of two nodes per level per balancing: we never
delete R, when we delete two of three nodes L, S, R then we move
them into R.
we only delete L if we are deleting two nodes, if we delete only
one node we delete S
if we shift leaves then we shift as much as we can: this is a
deliberate policy of extremism in node packing which results in
higher average utilization after repeated random balance operations
at the cost of more memory copies and more balancing as a result of
small insertions to full nodes.
if we shift internal nodes we try to evenly balance the node
utilization, with consequent less balancing at the cost of lower
utilization.
one could argue that the policy for directories in leaves should be
that of internal nodes, but we will wait until another day to
evaluate this.... It would be nice to someday measure and prove
these assumptions as to what is optimal....
/*
* Now we have all of the buffers that must be used in balancing of
* the tree. We rely on the assumption that schedule() will not occur
* while do_balance works. ( Only interrupt handlers are acceptable.)
* We balance the tree according to the analysis made before this,
* using buffers already obtained. For SMP support it will someday be
* necessary to add ordered locking of tb.
*/
*/
/*
* Some interesting rules of balancing:
* we delete a maximum of two nodes per level per balancing: we never
* delete R, when we delete two of three nodes L, S, R then we move
* them into R.
*
* we only delete L if we are deleting two nodes, if we delete only
* one node we delete S
*
* if we shift leaves then we shift as much as we can: this is a
* deliberate policy of extremism in node packing which results in
* higher average utilization after repeated random balance operations
* at the cost of more memory copies and more balancing as a result of
* small insertions to full nodes.
*
* if we shift internal nodes we try to evenly balance the node
* utilization, with consequent less balancing at the cost of lower
* utilization.
*
* one could argue that the policy for directories in leaves should be
* that of internal nodes, but we will wait until another day to
* evaluate this.... It would be nice to someday measure and prove
* these assumptions as to what is optimal....
*/
static inline void do_balance_starts(struct tree_balance *tb)
{
/* use print_cur_tb() to see initial state of struct
tree_balance */
/* use print_cur_tb() to see initial state of struct tree_balance */
/* store_print_tb (tb); */
/* do not delete, just comment it out */
/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
"check");*/
/*
print_tb(flag, PATH_LAST_POSITION(tb->tb_path),
tb->tb_path->pos_in_item, tb, "check");
*/
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
#ifdef CONFIG_REISERFS_CHECK
REISERFS_SB(tb->tb_sb)->cur_tb = tb;
......@@ -1487,9 +1512,10 @@ static inline void do_balance_completed(struct tree_balance *tb)
REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
#endif
/* reiserfs_free_block is no longer schedule safe. So, we need to
** put the buffers we want freed on the thrown list during do_balance,
** and then free them now
/*
* reiserfs_free_block is no longer schedule safe. So, we need to
* put the buffers we want freed on the thrown list during do_balance,
* and then free them now
*/
REISERFS_SB(tb->tb_sb)->s_do_balance++;
......@@ -1500,36 +1526,40 @@ static inline void do_balance_completed(struct tree_balance *tb)
free_thrown(tb);
}
void do_balance(struct tree_balance *tb, /* tree_balance structure */
struct item_head *ih, /* item header of inserted item */
const char *body, /* body of inserted item or bytes to paste */
int flag)
{ /* i - insert, d - delete
c - cut, p - paste
Cut means delete part of an item
(includes removing an entry from a
directory).
Delete means delete whole item.
Insert means add a new item into the
tree.
Paste means to append to the end of an
existing file or to insert a directory
entry. */
int child_pos, /* position of a child node in its parent */
h; /* level of the tree being processed */
struct item_head insert_key[2]; /* in our processing of one level
we sometimes determine what
must be inserted into the next
higher level. This insertion
consists of a key or two keys
and their corresponding
pointers */
struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next
level */
/*
* do_balance - balance the tree
*
* @tb: tree_balance structure
* @ih: item header of inserted item
* @body: body of inserted item or bytes to paste
* @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste
*
* Cut means delete part of an item (includes removing an entry from a
* directory).
*
* Delete means delete whole item.
*
* Insert means add a new item into the tree.
*
* Paste means to append to the end of an existing file or to
* insert a directory entry.
*/
void do_balance(struct tree_balance *tb, struct item_head *ih,
const char *body, int flag)
{
int child_pos; /* position of a child node in its parent */
int h; /* level of the tree being processed */
/*
* in our processing of one level we sometimes determine what
* must be inserted into the next higher level. This insertion
* consists of a key or two keys and their corresponding
* pointers
*/
struct item_head insert_key[2];
/* inserted node-ptrs for the next level */
struct buffer_head *insert_ptr[2];
tb->tb_mode = flag;
tb->need_balance_dirty = 0;
......@@ -1549,9 +1579,11 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
atomic_inc(&(fs_generation(tb->tb_sb)));
do_balance_starts(tb);
/* balance leaf returns 0 except if combining L R and S into
one node. see balance_internal() for explanation of this
line of code. */
/*
* balance_leaf returns 0 except if combining L R and S into
* one node. see balance_internal() for explanation of this
* line of code.
*/
child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
......
......@@ -15,20 +15,20 @@
#include <linux/quotaops.h>
/*
** We pack the tails of files on file close, not at the time they are written.
** This implies an unnecessary copy of the tail and an unnecessary indirect item
** insertion/balancing, for files that are written in one write.
** It avoids unnecessary tail packings (balances) for files that are written in
** multiple writes and are small enough to have tails.
**
** file_release is called by the VFS layer when the file is closed. If
** this is the last open file descriptor, and the file
** small enough to have a tail, and the tail is currently in an
** unformatted node, the tail is converted back into a direct item.
**
** We use reiserfs_truncate_file to pack the tail, since it already has
** all the conditions coded.
*/
* We pack the tails of files on file close, not at the time they are written.
* This implies an unnecessary copy of the tail and an unnecessary indirect item
* insertion/balancing, for files that are written in one write.
* It avoids unnecessary tail packings (balances) for files that are written in
* multiple writes and are small enough to have tails.
*
* file_release is called by the VFS layer when the file is closed. If
* this is the last open file descriptor, and the file
* small enough to have a tail, and the tail is currently in an
* unformatted node, the tail is converted back into a direct item.
*
* We use reiserfs_truncate_file to pack the tail, since it already has
* all the conditions coded.
*/
static int reiserfs_file_release(struct inode *inode, struct file *filp)
{
......@@ -57,14 +57,16 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
}
reiserfs_write_lock(inode->i_sb);
/* freeing preallocation only involves relogging blocks that
/*
* freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
* freed at the end of each transaction, so it is impossible for
* us to log any additional blocks (including quota blocks)
*/
err = journal_begin(&th, inode->i_sb, 1);
if (err) {
/* uh oh, we can't allow the inode to go away while there
/*
* uh oh, we can't allow the inode to go away while there
* is still preallocation blocks pending. Try to join the
* aborted transaction
*/
......@@ -72,11 +74,13 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
err = journal_join_abort(&th, inode->i_sb, 1);
if (err) {
/* hmpf, our choices here aren't good. We can pin the inode
* which will disallow unmount from every happening, we can
* do nothing, which will corrupt random memory on unmount,
* or we can forcibly remove the file from the preallocation
* list, which will leak blocks on disk. Lets pin the inode
/*
* hmpf, our choices here aren't good. We can pin
* the inode which will disallow unmount from ever
* happening, we can do nothing, which will corrupt
* random memory on unmount, or we can forcibly
* remove the file from the preallocation list, which
* will leak blocks on disk. Lets pin the inode
* and let the admin know what is going on.
*/
igrab(inode);
......@@ -102,10 +106,12 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
tail_has_to_be_packed(inode)) {
/* if regular file is released by last holder and it has been
appended (we append by unformatted node only) or its direct
item(s) had to be converted, then it may have to be
indirect2direct converted */
/*
* if regular file is released by last holder and it has been
* appended (we append by unformatted node only) or its direct
* item(s) had to be converted, then it may have to be
* indirect2direct converted
*/
err = reiserfs_truncate_file(inode, 0);
}
out:
......@@ -117,8 +123,9 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
static int reiserfs_file_open(struct inode *inode, struct file *file)
{
int err = dquot_file_open(inode, file);
/* somebody might be tailpacking on final close; wait for it */
if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
/* somebody might be tailpacking on final close; wait for it */
mutex_lock(&(REISERFS_I(inode)->tailpack));
atomic_inc(&REISERFS_I(inode)->openers);
mutex_unlock(&(REISERFS_I(inode)->tailpack));
......@@ -208,7 +215,8 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
journal_mark_dirty(&th, s, bh);
} else if (!buffer_dirty(bh)) {
mark_buffer_dirty(bh);
/* do data=ordered on any page past the end
/*
* do data=ordered on any page past the end
* of file and any buffer marked BH_New.
*/
if (reiserfs_data_ordered(inode->i_sb) &&
......
此差异已折叠。
......@@ -12,12 +12,6 @@
* Yura's function is added (04/07/2000)
*/
//
// keyed_hash
// yura_hash
// r5_hash
//
#include <linux/kernel.h>
#include "reiserfs.h"
#include <asm/types.h>
......@@ -56,7 +50,7 @@ u32 keyed_hash(const signed char *msg, int len)
u32 pad;
int i;
// assert(len >= 0 && len < 256);
/* assert(len >= 0 && len < 256); */
pad = (u32) len | ((u32) len << 8);
pad |= pad << 16;
......@@ -127,9 +121,10 @@ u32 keyed_hash(const signed char *msg, int len)
return h0 ^ h1;
}
/* What follows in this file is copyright 2000 by Hans Reiser, and the
* licensing of what follows is governed by reiserfs/README */
/*
* What follows in this file is copyright 2000 by Hans Reiser, and the
* licensing of what follows is governed by reiserfs/README
*/
u32 yura_hash(const signed char *msg, int len)
{
int j, pow;
......
此差异已折叠。
此差异已折叠。
......@@ -15,7 +15,8 @@
* reiserfs_ioctl - handler for ioctl for inode
* supported commands:
* 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
* and prevent packing file (argument arg has to be non-zero)
* and prevent packing file (argument arg has t
* be non-zero)
* 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
* 3) That's all for a while ...
*/
......@@ -132,7 +133,10 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* These are just misnamed, they actually get/put from/to user an int */
/*
* These are just misnamed, they actually
* get/put from/to user an int
*/
switch (cmd) {
case REISERFS_IOC32_UNPACK:
cmd = REISERFS_IOC_UNPACK;
......@@ -160,10 +164,10 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
/*
** reiserfs_unpack
** Function try to convert tail from direct item into indirect.
** It set up nopack attribute in the REISERFS_I(inode)->nopack
*/
* reiserfs_unpack
* Function try to convert tail from direct item into indirect.
* It set up nopack attribute in the REISERFS_I(inode)->nopack
*/
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
......@@ -194,9 +198,10 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
goto out;
}
/* we unpack by finding the page with the tail, and calling
** __reiserfs_write_begin on that page. This will force a
** reiserfs_get_block to unpack the tail for us.
/*
* we unpack by finding the page with the tail, and calling
* __reiserfs_write_begin on that page. This will force a
* reiserfs_get_block to unpack the tail for us.
*/
index = inode->i_size >> PAGE_CACHE_SHIFT;
mapping = inode->i_mapping;
......
......@@ -5,15 +5,17 @@
#include <linux/time.h>
#include "reiserfs.h"
// this contains item handlers for old item types: sd, direct,
// indirect, directory
/*
* this contains item handlers for old item types: sd, direct,
* indirect, directory
*/
/* and where are the comments? how about saying where we can find an
explanation of each item handler method? -Hans */
/*
* and where are the comments? how about saying where we can find an
* explanation of each item handler method? -Hans
*/
//////////////////////////////////////////////////////////////////////////////
// stat data functions
//
/* stat data functions */
static int sd_bytes_number(struct item_head *ih, int block_size)
{
return 0;
......@@ -60,7 +62,7 @@ static void sd_print_item(struct item_head *ih, char *item)
static void sd_check_item(struct item_head *ih, char *item)
{
// FIXME: type something here!
/* unused */
}
static int sd_create_vi(struct virtual_node *vn,
......@@ -68,7 +70,6 @@ static int sd_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_STAT_DATA;
//vi->vi_type |= VI_TYPE_STAT_DATA;// not needed?
return 0;
}
......@@ -117,15 +118,13 @@ static struct item_operations stat_data_ops = {
.print_vi = sd_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// direct item functions
//
/* direct item functions */
static int direct_bytes_number(struct item_head *ih, int block_size)
{
return ih_item_len(ih);
}
// FIXME: this should probably switch to indirect as well
/* FIXME: this should probably switch to indirect as well */
static void direct_decrement_key(struct cpu_key *key)
{
cpu_key_k_offset_dec(key);
......@@ -144,7 +143,7 @@ static void direct_print_item(struct item_head *ih, char *item)
{
int j = 0;
// return;
/* return; */
printk("\"");
while (j < ih_item_len(ih))
printk("%c", item[j++]);
......@@ -153,7 +152,7 @@ static void direct_print_item(struct item_head *ih, char *item)
static void direct_check_item(struct item_head *ih, char *item)
{
// FIXME: type something here!
/* unused */
}
static int direct_create_vi(struct virtual_node *vn,
......@@ -161,7 +160,6 @@ static int direct_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_DIRECT;
//vi->vi_type |= VI_TYPE_DIRECT;
return 0;
}
......@@ -211,16 +209,13 @@ static struct item_operations direct_ops = {
.print_vi = direct_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// indirect item functions
//
/* indirect item functions */
static int indirect_bytes_number(struct item_head *ih, int block_size)
{
return ih_item_len(ih) / UNFM_P_SIZE * block_size; //- get_ih_free_space (ih);
return ih_item_len(ih) / UNFM_P_SIZE * block_size;
}
// decrease offset, if it becomes 0, change type to stat data
/* decrease offset, if it becomes 0, change type to stat data */
static void indirect_decrement_key(struct cpu_key *key)
{
cpu_key_k_offset_dec(key);
......@@ -228,7 +223,7 @@ static void indirect_decrement_key(struct cpu_key *key)
set_cpu_key_k_type(key, TYPE_STAT_DATA);
}
// if it is not first item of the body, then it is mergeable
/* if it is not first item of the body, then it is mergeable */
static int indirect_is_left_mergeable(struct reiserfs_key *key,
unsigned long bsize)
{
......@@ -236,7 +231,7 @@ static int indirect_is_left_mergeable(struct reiserfs_key *key,
return (le_key_k_offset(version, key) != 1);
}
// printing of indirect item
/* printing of indirect item */
static void start_new_sequence(__u32 * start, int *len, __u32 new)
{
*start = new;
......@@ -295,7 +290,7 @@ static void indirect_print_item(struct item_head *ih, char *item)
static void indirect_check_item(struct item_head *ih, char *item)
{
// FIXME: type something here!
/* unused */
}
static int indirect_create_vi(struct virtual_node *vn,
......@@ -303,7 +298,6 @@ static int indirect_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_INDIRECT;
//vi->vi_type |= VI_TYPE_INDIRECT;
return 0;
}
......@@ -321,16 +315,19 @@ static int indirect_check_right(struct virtual_item *vi, int free)
return indirect_check_left(vi, free, 0, 0);
}
// return size in bytes of 'units' units. If first == 0 - calculate from the head (left), otherwise - from tail (right)
/*
* return size in bytes of 'units' units. If first == 0 - calculate
* from the head (left), otherwise - from tail (right)
*/
static int indirect_part_size(struct virtual_item *vi, int first, int units)
{
// unit of indirect item is byte (yet)
/* unit of indirect item is byte (yet) */
return units;
}
static int indirect_unit_num(struct virtual_item *vi)
{
// unit of indirect item is byte (yet)
/* unit of indirect item is byte (yet) */
return vi->vi_item_len - IH_SIZE;
}
......@@ -356,10 +353,7 @@ static struct item_operations indirect_ops = {
.print_vi = indirect_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// direntry functions
//
/* direntry functions */
static int direntry_bytes_number(struct item_head *ih, int block_size)
{
reiserfs_warning(NULL, "vs-16090",
......@@ -428,7 +422,7 @@ static void direntry_check_item(struct item_head *ih, char *item)
int i;
struct reiserfs_de_head *deh;
// FIXME: type something here!
/* unused */
deh = (struct reiserfs_de_head *)item;
for (i = 0; i < ih_entry_count(ih); i++, deh++) {
;
......@@ -439,7 +433,8 @@ static void direntry_check_item(struct item_head *ih, char *item)
/*
* function returns old entry number in directory item in real node
* using new entry number in virtual item in virtual node */
* using new entry number in virtual item in virtual node
*/
static inline int old_entry_num(int is_affected, int virtual_entry_num,
int pos_in_item, int mode)
{
......@@ -463,9 +458,11 @@ static inline int old_entry_num(int is_affected, int virtual_entry_num,
return virtual_entry_num - 1;
}
/* Create an array of sizes of directory entries for virtual
item. Return space used by an item. FIXME: no control over
consuming of space used by this item handler */
/*
* Create an array of sizes of directory entries for virtual
* item. Return space used by an item. FIXME: no control over
* consuming of space used by this item handler
*/
static int direntry_create_vi(struct virtual_node *vn,
struct virtual_item *vi,
int is_affected, int insert_size)
......@@ -529,10 +526,10 @@ static int direntry_create_vi(struct virtual_node *vn,
}
//
// return number of entries which may fit into specified amount of
// free space, or -1 if free space is not enough even for 1 entry
//
/*
* return number of entries which may fit into specified amount of
* free space, or -1 if free space is not enough even for 1 entry
*/
static int direntry_check_left(struct virtual_item *vi, int free,
int start_skip, int end_skip)
{
......@@ -541,8 +538,8 @@ static int direntry_check_left(struct virtual_item *vi, int free,
struct direntry_uarea *dir_u = vi->vi_uarea;
for (i = start_skip; i < dir_u->entry_count - end_skip; i++) {
/* i-th entry doesn't fit into the remaining free space */
if (dir_u->entry_sizes[i] > free)
/* i-th entry doesn't fit into the remaining free space */
break;
free -= dir_u->entry_sizes[i];
......@@ -570,8 +567,8 @@ static int direntry_check_right(struct virtual_item *vi, int free)
struct direntry_uarea *dir_u = vi->vi_uarea;
for (i = dir_u->entry_count - 1; i >= 0; i--) {
/* i-th entry doesn't fit into the remaining free space */
if (dir_u->entry_sizes[i] > free)
/* i-th entry doesn't fit into the remaining free space */
break;
free -= dir_u->entry_sizes[i];
......@@ -643,9 +640,7 @@ static struct item_operations direntry_ops = {
.print_vi = direntry_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// Error catching functions to catch errors caused by incorrect item types.
//
/* Error catching functions to catch errors caused by incorrect item types. */
static int errcatch_bytes_number(struct item_head *ih, int block_size)
{
reiserfs_warning(NULL, "green-16001",
......@@ -685,8 +680,12 @@ static int errcatch_create_vi(struct virtual_node *vn,
{
reiserfs_warning(NULL, "green-16006",
"Invalid item type observed, run fsck ASAP");
return 0; // We might return -1 here as well, but it won't help as create_virtual_node() from where
// this operation is called from is of return type void.
/*
* We might return -1 here as well, but it won't help as
* create_virtual_node() from where this operation is called
* from is of return type void.
*/
return 0;
}
static int errcatch_check_left(struct virtual_item *vi, int free,
......@@ -739,9 +738,6 @@ static struct item_operations errcatch_ops = {
errcatch_print_vi
};
//////////////////////////////////////////////////////////////////////////////
//
//
#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
#error Item types must use disk-format assigned values.
#endif
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -7,7 +7,7 @@
#include <linux/time.h>
#include "reiserfs.h"
// find where objectid map starts
/* find where objectid map starts */
#define objectid_map(s,rs) (old_format_only (s) ? \
(__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
(__le32 *)((rs) + 1))
......@@ -20,7 +20,7 @@ static void check_objectid_map(struct super_block *s, __le32 * map)
reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
(long unsigned int)le32_to_cpu(map[0]));
// FIXME: add something else here
/* FIXME: add something else here */
}
#else
......@@ -29,19 +29,21 @@ static void check_objectid_map(struct super_block *s, __le32 * map)
}
#endif
/* When we allocate objectids we allocate the first unused objectid.
Each sequence of objectids in use (the odd sequences) is followed
by a sequence of objectids not in use (the even sequences). We
only need to record the last objectid in each of these sequences
(both the odd and even sequences) in order to fully define the
boundaries of the sequences. A consequence of allocating the first
objectid not in use is that under most conditions this scheme is
extremely compact. The exception is immediately after a sequence
of operations which deletes a large number of objects of
non-sequential objectids, and even then it will become compact
again as soon as more objects are created. Note that many
interesting optimizations of layout could result from complicating
objectid assignment, but we have deferred making them for now. */
/*
* When we allocate objectids we allocate the first unused objectid.
* Each sequence of objectids in use (the odd sequences) is followed
* by a sequence of objectids not in use (the even sequences). We
* only need to record the last objectid in each of these sequences
* (both the odd and even sequences) in order to fully define the
* boundaries of the sequences. A consequence of allocating the first
* objectid not in use is that under most conditions this scheme is
* extremely compact. The exception is immediately after a sequence
* of operations which deletes a large number of objects of
* non-sequential objectids, and even then it will become compact
* again as soon as more objects are created. Note that many
* interesting optimizations of layout could result from complicating
* objectid assignment, but we have deferred making them for now.
*/
/* get unique object identifier */
__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
......@@ -64,19 +66,23 @@ __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
return 0;
}
/* This incrementation allocates the first unused objectid. That
is to say, the first entry on the objectid map is the first
unused objectid, and by incrementing it we use it. See below
where we check to see if we eliminated a sequence of unused
objectids.... */
/*
* This incrementation allocates the first unused objectid. That
* is to say, the first entry on the objectid map is the first
* unused objectid, and by incrementing it we use it. See below
* where we check to see if we eliminated a sequence of unused
* objectids....
*/
map[1] = cpu_to_le32(unused_objectid + 1);
/* Now we check to see if we eliminated the last remaining member of
the first even sequence (and can eliminate the sequence by
eliminating its last objectid from oids), and can collapse the
first two odd sequences into one sequence. If so, then the net
result is to eliminate a pair of objectids from oids. We do this
by shifting the entire map to the left. */
/*
* Now we check to see if we eliminated the last remaining member of
* the first even sequence (and can eliminate the sequence by
* eliminating its last objectid from oids), and can collapse the
* first two odd sequences into one sequence. If so, then the net
* result is to eliminate a pair of objectids from oids. We do this
* by shifting the entire map to the left.
*/
if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
memmove(map + 1, map + 3,
(sb_oid_cursize(rs) - 3) * sizeof(__u32));
......@@ -97,30 +103,33 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
int i = 0;
BUG_ON(!th->t_trans_id);
//return;
/*return; */
check_objectid_map(s, map);
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
/* start at the beginning of the objectid map (i = 0) and go to
the end of it (i = disk_sb->s_oid_cursize). Linear search is
what we use, though it is possible that binary search would be
more efficient after performing lots of deletions (which is
when oids is large.) We only check even i's. */
/*
* start at the beginning of the objectid map (i = 0) and go to
* the end of it (i = disk_sb->s_oid_cursize). Linear search is
* what we use, though it is possible that binary search would be
* more efficient after performing lots of deletions (which is
* when oids is large.) We only check even i's.
*/
while (i < sb_oid_cursize(rs)) {
if (objectid_to_release == le32_to_cpu(map[i])) {
/* This incrementation unallocates the objectid. */
//map[i]++;
le32_add_cpu(&map[i], 1);
/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
/*
* Did we unallocate the last member of an
* odd sequence, and can shrink oids?
*/
if (map[i] == map[i + 1]) {
/* shrink objectid map */
memmove(map + i, map + i + 2,
(sb_oid_cursize(rs) - i -
2) * sizeof(__u32));
//disk_sb->s_oid_cursize -= 2;
set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
RFALSE(sb_oid_cursize(rs) < 2 ||
......@@ -135,14 +144,19 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
objectid_to_release < le32_to_cpu(map[i + 1])) {
/* size of objectid map is not changed */
if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
//objectid_map[i+1]--;
le32_add_cpu(&map[i + 1], -1);
return;
}
/* JDM comparing two little-endian values for equality -- safe */
/*
* JDM comparing two little-endian values for
* equality -- safe
*/
/*
* objectid map must be expanded, but
* there is no space
*/
if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
/* objectid map must be expanded, but there is no space */
PROC_INFO_INC(s, leaked_oid);
return;
}
......@@ -178,8 +192,9 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
new_objectid_map = (__le32 *) (disk_sb + 1);
if (cur_size > new_size) {
/* mark everyone used that was listed as free at the end of the objectid
** map
/*
* mark everyone used that was listed as free at
* the end of the objectid map
*/
objectid_map[new_size - 1] = objectid_map[cur_size - 1];
set_sb_oid_cursize(disk_sb, new_size);
......
......@@ -172,18 +172,19 @@ static char *is_there_reiserfs_struct(char *fmt, int *what)
return k;
}
/* debugging reiserfs we used to print out a lot of different
variables, like keys, item headers, buffer heads etc. Values of
most fields matter. So it took a long time just to write
appropriative printk. With this reiserfs_warning you can use format
specification for complex structures like you used to do with
printfs for integers, doubles and pointers. For instance, to print
out key structure you have to write just:
reiserfs_warning ("bad key %k", key);
instead of
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
key->k_offset, key->k_uniqueness);
*/
/*
* debugging reiserfs we used to print out a lot of different
* variables, like keys, item headers, buffer heads etc. Values of
* most fields matter. So it took a long time just to write
* appropriative printk. With this reiserfs_warning you can use format
* specification for complex structures like you used to do with
* printfs for integers, doubles and pointers. For instance, to print
* out key structure you have to write just:
* reiserfs_warning ("bad key %k", key);
* instead of
* printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
* key->k_offset, key->k_uniqueness);
*/
static DEFINE_SPINLOCK(error_lock);
static void prepare_error_buf(const char *fmt, va_list args)
{
......@@ -243,15 +244,16 @@ static void prepare_error_buf(const char *fmt, va_list args)
}
/* in addition to usual conversion specifiers this accepts reiserfs
specific conversion specifiers:
%k to print little endian key,
%K to print cpu key,
%h to print item_head,
%t to print directory entry
%z to print block head (arg must be struct buffer_head *
%b to print buffer_head
*/
/*
* in addition to usual conversion specifiers this accepts reiserfs
* specific conversion specifiers:
* %k to print little endian key,
* %K to print cpu key,
* %h to print item_head,
* %t to print directory entry
* %z to print block head (arg must be struct buffer_head *
* %b to print buffer_head
*/
#define do_reiserfs_warning(fmt)\
{\
......@@ -304,50 +306,52 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
#endif
}
/* The format:
maintainer-errorid: [function-name:] message
where errorid is unique to the maintainer and function-name is
optional, is recommended, so that anyone can easily find the bug
with a simple grep for the short to type string
maintainer-errorid. Don't bother with reusing errorids, there are
lots of numbers out there.
Example:
reiserfs_panic(
p_sb, "reiser-29: reiserfs_new_blocknrs: "
"one of search_start or rn(%d) is equal to MAX_B_NUM,"
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
rn, bh
);
Regular panic()s sometimes clear the screen before the message can
be read, thus the need for the while loop.
Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
pointless complexity):
panics in reiserfs.h have numbers from 1000 to 1999
super.c 2000 to 2999
preserve.c (unused) 3000 to 3999
bitmap.c 4000 to 4999
stree.c 5000 to 5999
prints.c 6000 to 6999
namei.c 7000 to 7999
fix_nodes.c 8000 to 8999
dir.c 9000 to 9999
lbalance.c 10000 to 10999
ibalance.c 11000 to 11999 not ready
do_balan.c 12000 to 12999
inode.c 13000 to 13999
file.c 14000 to 14999
objectid.c 15000 - 15999
buffer.c 16000 - 16999
symlink.c 17000 - 17999
. */
/*
* The format:
*
* maintainer-errorid: [function-name:] message
*
* where errorid is unique to the maintainer and function-name is
* optional, is recommended, so that anyone can easily find the bug
* with a simple grep for the short to type string
* maintainer-errorid. Don't bother with reusing errorids, there are
* lots of numbers out there.
*
* Example:
*
* reiserfs_panic(
* p_sb, "reiser-29: reiserfs_new_blocknrs: "
* "one of search_start or rn(%d) is equal to MAX_B_NUM,"
* "which means that we are optimizing location based on the "
* "bogus location of a temp buffer (%p).",
* rn, bh
* );
*
* Regular panic()s sometimes clear the screen before the message can
* be read, thus the need for the while loop.
*
* Numbering scheme for panic used by Vladimir and Anatoly( Hans completely
* ignores this scheme, and considers it pointless complexity):
*
* panics in reiserfs_fs.h have numbers from 1000 to 1999
* super.c 2000 to 2999
* preserve.c (unused) 3000 to 3999
* bitmap.c 4000 to 4999
* stree.c 5000 to 5999
* prints.c 6000 to 6999
* namei.c 7000 to 7999
* fix_nodes.c 8000 to 8999
* dir.c 9000 to 9999
* lbalance.c 10000 to 10999
* ibalance.c 11000 to 11999 not ready
* do_balan.c 12000 to 12999
* inode.c 13000 to 13999
* file.c 14000 to 14999
* objectid.c 15000 - 15999
* buffer.c 16000 - 16999
* symlink.c 17000 - 17999
*
* . */
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
......@@ -411,9 +415,11 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
reiserfs_abort_journal(sb, errno);
}
/* this prints internal nodes (4 keys/items in line) (dc_number,
dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
dc_size)...*/
/*
* this prints internal nodes (4 keys/items in line) (dc_number,
* dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
* dc_size)...
*/
static int print_internal(struct buffer_head *bh, int first, int last)
{
struct reiserfs_key *key;
......@@ -543,9 +549,11 @@ static int print_super_block(struct buffer_head *bh)
printk("Block count %u\n", sb_block_count(rs));
printk("Blocksize %d\n", sb_blocksize(rs));
printk("Free blocks %u\n", sb_free_blocks(rs));
// FIXME: this would be confusing if
// someone stores reiserfs super block in some data block ;)
/*
* FIXME: this would be confusing if
* someone stores reiserfs super block in some data block ;)
// skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
*/
skipped = bh->b_blocknr;
data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
(!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) +
......@@ -581,8 +589,8 @@ static int print_desc_block(struct buffer_head *bh)
return 0;
}
void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int last)
/* ..., int print_mode, int first, int last) */
void print_block(struct buffer_head *bh, ...)
{
va_list args;
int mode, first, last;
......
此差异已折叠。
......@@ -53,8 +53,10 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
}
bforget(bh);
/* old disk layout detection; those partitions can be mounted, but
* cannot be resized */
/*
* old disk layout detection; those partitions can be mounted, but
* cannot be resized
*/
if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
!= REISERFS_DISK_OFFSET_IN_BYTES) {
printk
......@@ -86,12 +88,14 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
return -ENOMEM;
}
/* the new journal bitmaps are zero filled, now we copy in the bitmap
** node pointers from the old journal bitmap structs, and then
** transfer the new data structures into the journal struct.
**
** using the copy_size var below allows this code to work for
** both shrinking and expanding the FS.
/*
* the new journal bitmaps are zero filled, now we copy i
* the bitmap node pointers from the old journal bitmap
* structs, and then transfer the new data structures
* into the journal struct.
*
* using the copy_size var below allows this code to work for
* both shrinking and expanding the FS.
*/
copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
copy_size =
......@@ -101,36 +105,45 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
jb = SB_JOURNAL(s)->j_list_bitmap + i;
memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
/* just in case vfree schedules on us, copy the new
** pointer into the journal struct before freeing the
** old one
/*
* just in case vfree schedules on us, copy the new
* pointer into the journal struct before freeing the
* old one
*/
node_tmp = jb->bitmaps;
jb->bitmaps = jbitmap[i].bitmaps;
vfree(node_tmp);
}
/* allocate additional bitmap blocks, reallocate array of bitmap
* block pointers */
/*
* allocate additional bitmap blocks, reallocate
* array of bitmap block pointers
*/
bitmap =
vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
if (!bitmap) {
/* Journal bitmaps are still supersized, but the memory isn't
* leaked, so I guess it's ok */
/*
* Journal bitmaps are still supersized, but the
* memory isn't leaked, so I guess it's ok
*/
printk("reiserfs_resize: unable to allocate memory.\n");
return -ENOMEM;
}
for (i = 0; i < bmap_nr; i++)
bitmap[i] = old_bitmap[i];
/* This doesn't go through the journal, but it doesn't have to.
* The changes are still atomic: We're synced up when the journal
* transaction begins, and the new bitmaps don't matter if the
* transaction fails. */
/*
* This doesn't go through the journal, but it doesn't have to.
* The changes are still atomic: We're synced up when the
* journal transaction begins, and the new bitmaps don't
* matter if the transaction fails.
*/
for (i = bmap_nr; i < bmap_nr_new; i++) {
int depth;
/* don't use read_bitmap_block since it will cache
* the uninitialized bitmap */
/*
* don't use read_bitmap_block since it will cache
* the uninitialized bitmap
*/
depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, i * s->s_blocksize * 8);
reiserfs_write_lock_nested(s, depth);
......@@ -147,7 +160,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(bh);
reiserfs_write_lock_nested(s, depth);
// update bitmap_info stuff
/* update bitmap_info stuff */
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
}
......@@ -156,9 +169,11 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
vfree(old_bitmap);
}
/* begin transaction, if there was an error, it's fine. Yes, we have
/*
* begin transaction, if there was an error, it's fine. Yes, we have
* incorrect bitmaps now, but none of it is ever going to touch the
* disk anyway. */
* disk anyway.
*/
err = journal_begin(&th, s, 10);
if (err)
return err;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -56,9 +56,11 @@
#define XAROOT_NAME "xattrs"
/* Helpers for inode ops. We do this so that we don't have all the VFS
/*
* Helpers for inode ops. We do this so that we don't have all the VFS
* overhead and also for proper i_mutex annotation.
* dir->i_mutex must be held for all of them. */
* dir->i_mutex must be held for all of them.
*/
#ifdef CONFIG_REISERFS_FS_XATTR
static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
{
......@@ -73,10 +75,12 @@ static int xattr_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return dir->i_op->mkdir(dir, dentry, mode);
}
/* We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
/*
* We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
* mutation ops aren't called during rename or splace, which are the
* only other users of I_MUTEX_CHILD. It violates the ordering, but that's
* better than allocating another subclass just for this code. */
* better than allocating another subclass just for this code.
*/
static int xattr_unlink(struct inode *dir, struct dentry *dentry)
{
int error;
......@@ -166,9 +170,11 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
return xadir;
}
/* The following are side effects of other operations that aren't explicitly
/*
* The following are side effects of other operations that aren't explicitly
* modifying extended attributes. This includes operations such as permissions
* or ownership changes, object deletions, etc. */
* or ownership changes, object deletions, etc.
*/
struct reiserfs_dentry_buf {
struct dir_context ctx;
struct dentry *xadir;
......@@ -267,11 +273,13 @@ static int reiserfs_for_each_xattr(struct inode *inode,
cleanup_dentry_buf(&buf);
if (!err) {
/* We start a transaction here to avoid a ABBA situation
/*
* We start a transaction here to avoid a ABBA situation
* between the xattr root's i_mutex and the journal lock.
* This doesn't incur much additional overhead since the
* new transaction will just nest inside the
* outer transaction. */
* outer transaction.
*/
int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
......@@ -349,9 +357,11 @@ int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs)
}
#ifdef CONFIG_REISERFS_FS_XATTR
/* Returns a dentry corresponding to a specific extended attribute file
/*
* Returns a dentry corresponding to a specific extended attribute file
* for the inode. If flags allow, the file is created. Otherwise, a
* valid or negative dentry, or an error is returned. */
* valid or negative dentry, or an error is returned.
*/
static struct dentry *xattr_lookup(struct inode *inode, const char *name,
int flags)
{
......@@ -400,8 +410,10 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
{
struct address_space *mapping = dir->i_mapping;
struct page *page;
/* We can deadlock if we try to free dentries,
and an unlink/rmdir has just occurred - GFP_NOFS avoids this */
/*
* We can deadlock if we try to free dentries,
* and an unlink/rmdir has just occurred - GFP_NOFS avoids this
*/
mapping_set_gfp_mask(mapping, GFP_NOFS);
page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
if (!IS_ERR(page)) {
......@@ -615,8 +627,10 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
if (name == NULL)
return -EINVAL;
/* We can't have xattrs attached to v1 items since they don't have
* generation numbers */
/*
* We can't have xattrs attached to v1 items since they don't have
* generation numbers
*/
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
......@@ -913,12 +927,16 @@ static const struct xattr_handler *reiserfs_xattr_handlers[] = {
static int xattr_mount_check(struct super_block *s)
{
/* We need generation numbers to ensure that the oid mapping is correct
* v3.5 filesystems don't have them. */
/*
* We need generation numbers to ensure that the oid mapping is correct
* v3.5 filesystems don't have them.
*/
if (old_format_only(s)) {
if (reiserfs_xattrs_optional(s)) {
/* Old format filesystem, but optional xattrs have
* been enabled. Error out. */
/*
* Old format filesystem, but optional xattrs have
* been enabled. Error out.
*/
reiserfs_warning(s, "jdm-2005",
"xattrs/ACLs not supported "
"on pre-v3.6 format filesystems. "
......@@ -972,9 +990,11 @@ int reiserfs_lookup_privroot(struct super_block *s)
return err;
}
/* We need to take a copy of the mount flags since things like
/*
* We need to take a copy of the mount flags since things like
* MS_RDONLY don't get set until *after* we're called.
* mount_flags != mount_options */
* mount_flags != mount_options
*/
int reiserfs_xattr_init(struct super_block *s, int mount_flags)
{
int err = 0;
......
......@@ -61,7 +61,8 @@ static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
return ret;
}
/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
/*
* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
* Let's try to be smart about it.
* xattr root: We cache it. If it's not cached, we may need to create it.
* xattr dir: If anything has been loaded for this inode, we can set a flag
......
......@@ -25,8 +25,10 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int size = acl ? posix_acl_xattr_size(acl->a_count) : 0;
/* Pessimism: We can't assume that anything from the xattr root up
* has been created. */
/*
* Pessimism: We can't assume that anything from the xattr root up
* has been created.
*/
jcreate_blocks = reiserfs_xattr_jcreate_nblocks(inode) +
reiserfs_xattr_nblocks(inode, size) * 2;
......@@ -208,8 +210,10 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
retval = reiserfs_xattr_get(inode, name, value, size);
if (retval == -ENODATA || retval == -ENOSYS) {
/* This shouldn't actually happen as it should have
been caught above.. but just in case */
/*
* This shouldn't actually happen as it should have
* been caught above.. but just in case
*/
acl = NULL;
} else if (retval < 0) {
acl = ERR_PTR(retval);
......@@ -290,8 +294,10 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
return error;
}
/* dir->i_mutex: locked,
* inode is new and not released into the wild yet */
/*
* dir->i_mutex: locked,
* inode is new and not released into the wild yet
*/
int
reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
struct inode *dir, struct dentry *dentry,
......@@ -304,14 +310,18 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
if (S_ISLNK(inode->i_mode))
return 0;
/* ACLs can only be used on "new" objects, so if it's an old object
* there is nothing to inherit from */
/*
* ACLs can only be used on "new" objects, so if it's an old object
* there is nothing to inherit from
*/
if (get_inode_sd_version(dir) == STAT_DATA_V1)
goto apply_umask;
/* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
/*
* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
* would be useless since permissions are ignored, and a pain because
* it introduces locking cycles */
* it introduces locking cycles
*/
if (IS_PRIVATE(dir)) {
inode->i_flags |= S_PRIVATE;
goto apply_umask;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册