提交 5e38b72a 编写于 作者: L Linus Torvalds

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
 "Fix various bug fixes in ext4 caused by races and memory allocation
  failures"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix fdatasync(2) after extent manipulation operations
  ext4: fix data corruption for mmap writes
  ext4: fix data corruption with EXT4_GET_BLOCKS_ZERO
  ext4: fix quota charging for shared xattr blocks
  ext4: remove redundant check for encrypted file on dio write path
  ext4: remove unused d_name argument from ext4_search_dir() et al.
  ext4: fix off-by-one error when writing back pages before dio read
  ext4: fix off-by-one on max nr_pages in ext4_find_unwritten_pgoff()
  ext4: keep existing extra fields when inode expands
  ext4: handle the rest of ext4_mb_load_buddy() ENOMEM errors
  ext4: fix off-by-in in loop termination in ext4_find_unwritten_pgoff()
  ext4: fix SEEK_HOLE
  jbd2: preserve original nofs flag during journal restart
  ext4: clear lockdep subtype for quota files on quota off
......@@ -4,6 +4,7 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
#include <linux/quotaops.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
......@@ -232,6 +233,9 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
handle_t *handle;
int error, retries = 0;
error = dquot_initialize(inode);
if (error)
return error;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR,
ext4_jbd2_credits_xattr(inode));
......
......@@ -2523,7 +2523,6 @@ extern int ext4_search_dir(struct buffer_head *bh,
int buf_size,
struct inode *dir,
struct ext4_filename *fname,
const struct qstr *d_name,
unsigned int offset,
struct ext4_dir_entry_2 **res_dir);
extern int ext4_generic_delete_entry(handle_t *handle,
......@@ -3007,7 +3006,6 @@ extern int htree_inlinedir_to_tree(struct file *dir_file,
int *has_inline_data);
extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_filename *fname,
const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data);
extern int ext4_delete_inline_entry(handle_t *handle,
......
......@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
struct ext4_extent zero_ex;
struct ext4_extent zero_ex1, zero_ex2;
struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len;
int allocated = 0, max_zeroout = 0;
int err = 0;
int split_flag = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
......@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
zero_ex.ee_len = 0;
zero_ex1.ee_len = 0;
zero_ex2.ee_len = 0;
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
......@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
if (ext4_encrypted_inode(inode))
max_zeroout = 0;
/* If extent is less than s_max_zeroout_kb, zeroout directly */
if (max_zeroout && (ee_len <= max_zeroout)) {
err = ext4_ext_zeroout(inode, ex);
if (err)
goto out;
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
ext4_ext_mark_initialized(ex);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
/*
* four cases:
* five cases:
* 1. split the extent into three extents.
* 2. split the extent into two extents, zeroout the first half.
* 3. split the extent into two extents, zeroout the second half.
* 2. split the extent into two extents, zeroout the head of the first
* extent.
* 3. split the extent into two extents, zeroout the tail of the second
* extent.
* 4. split the extent into two extents with out zeroout.
* 5. no splitting needed, just possibly zeroout the head and / or the
* tail of the extent.
*/
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
if (max_zeroout && (allocated > map->m_len)) {
if (max_zeroout && (allocated > split_map.m_len)) {
if (allocated <= max_zeroout) {
/* case 3 */
zero_ex.ee_block =
cpu_to_le32(map->m_lblk);
zero_ex.ee_len = cpu_to_le16(allocated);
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex) + map->m_lblk - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex);
/* case 3 or 5 */
zero_ex1.ee_block =
cpu_to_le32(split_map.m_lblk +
split_map.m_len);
zero_ex1.ee_len =
cpu_to_le16(allocated - split_map.m_len);
ext4_ext_store_pblock(&zero_ex1,
ext4_ext_pblock(ex) + split_map.m_lblk +
split_map.m_len - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex1);
if (err)
goto out;
split_map.m_lblk = map->m_lblk;
split_map.m_len = allocated;
} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
/* case 2 */
if (map->m_lblk != ee_block) {
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(map->m_lblk -
}
if (split_map.m_lblk - ee_block + split_map.m_len <
max_zeroout) {
/* case 2 or 5 */
if (split_map.m_lblk != ee_block) {
zero_ex2.ee_block = ex->ee_block;
zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
ee_block);
ext4_ext_store_pblock(&zero_ex,
ext4_ext_store_pblock(&zero_ex2,
ext4_ext_pblock(ex));
err = ext4_ext_zeroout(inode, &zero_ex);
err = ext4_ext_zeroout(inode, &zero_ex2);
if (err)
goto out;
}
split_map.m_len += split_map.m_lblk - ee_block;
split_map.m_lblk = ee_block;
split_map.m_len = map->m_lblk - ee_block + map->m_len;
allocated = map->m_len;
}
}
......@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
err = 0;
out:
/* If we have gotten a failure, don't zero out status tree */
if (!err)
err = ext4_zeroout_es(inode, &zero_ex);
if (!err) {
err = ext4_zeroout_es(inode, &zero_ex1);
if (!err)
err = ext4_zeroout_es(inode, &zero_ex2);
}
return err ? err : allocated;
}
......@@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
......@@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
......@@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
......
......@@ -474,57 +474,37 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
endoff = (loff_t)end_blk << blkbits;
index = startoff >> PAGE_SHIFT;
end = endoff >> PAGE_SHIFT;
end = (endoff - 1) >> PAGE_SHIFT;
pagevec_init(&pvec, 0);
do {
int i, num;
unsigned long nr_pages;
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
if (nr_pages == 0) {
if (whence == SEEK_DATA)
break;
BUG_ON(whence != SEEK_HOLE);
/*
* If this is the first time to go into the loop and
* offset is not beyond the end offset, it will be a
* hole at this offset
*/
if (lastoff == startoff || lastoff < endoff)
found = 1;
break;
}
/*
* If this is the first time to go into the loop and
* offset is smaller than the first page offset, it will be a
* hole at this offset.
*/
if (lastoff == startoff && whence == SEEK_HOLE &&
lastoff < page_offset(pvec.pages[0])) {
found = 1;
if (nr_pages == 0)
break;
}
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct buffer_head *bh, *head;
/*
* If the current offset is not beyond the end of given
* range, it will be a hole.
* If current offset is smaller than the page offset,
* there is a hole at this offset.
*/
if (lastoff < endoff && whence == SEEK_HOLE &&
page->index > end) {
if (whence == SEEK_HOLE && lastoff < endoff &&
lastoff < page_offset(pvec.pages[i])) {
found = 1;
*offset = lastoff;
goto out;
}
if (page->index > end)
goto out;
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping)) {
......@@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
unlock_page(page);
}
/*
* The no. of pages is less than our desired, that would be a
* hole in there.
*/
if (nr_pages < num && whence == SEEK_HOLE) {
found = 1;
*offset = lastoff;
/* The no. of pages is less than our desired, we are done. */
if (nr_pages < num)
break;
}
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec);
} while (index <= end);
if (whence == SEEK_HOLE && lastoff < endoff) {
found = 1;
*offset = lastoff;
}
out:
pagevec_release(&pvec);
return found;
......
......@@ -1627,7 +1627,6 @@ int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent,
struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_filename *fname,
const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data)
{
......@@ -1649,7 +1648,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
EXT4_INLINE_DOTDOT_SIZE;
inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
dir, fname, d_name, 0, res_dir);
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
if (ret < 0)
......@@ -1662,7 +1661,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
dir, fname, d_name, 0, res_dir);
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
......
......@@ -2124,15 +2124,29 @@ static int ext4_writepage(struct page *page,
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
{
int len;
loff_t size = i_size_read(mpd->inode);
loff_t size;
int err;
BUG_ON(page->index != mpd->first_page);
clear_page_dirty_for_io(page);
/*
* We have to be very careful here! Nothing protects writeback path
* against i_size changes and the page can be writeably mapped into
* page tables. So an application can be growing i_size and writing
* data through mmap while writeback runs. clear_page_dirty_for_io()
* write-protects our page in page tables and the page cannot get
* written to again until we release page lock. So only after
* clear_page_dirty_for_io() we are safe to sample i_size for
* ext4_bio_write_page() to zero-out tail of the written page. We rely
* on the barrier provided by TestClearPageDirty in
* clear_page_dirty_for_io() to make sure i_size is really sampled only
* after page tables are updated.
*/
size = i_size_read(mpd->inode);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
clear_page_dirty_for_io(page);
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
mpd->wbc->nr_to_write--;
......@@ -3629,9 +3643,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block_func, ext4_end_io_dio, NULL,
dio_flags);
......@@ -3713,7 +3724,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
*/
inode_lock_shared(inode);
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
iocb->ki_pos + count);
iocb->ki_pos + count - 1);
if (ret)
goto out_unlock;
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
......@@ -4207,6 +4218,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
out_dio:
......@@ -5637,8 +5650,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
new_extra_isize);
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
EXT4_I(inode)->i_extra_isize, 0,
new_extra_isize - EXT4_I(inode)->i_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
......
......@@ -3887,7 +3887,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
ext4_error(sb, "Error loading buddy information for %u", group);
ext4_warning(sb, "Error %d loading buddy information for %u",
err, group);
put_bh(bitmap_bh);
return 0;
}
......@@ -4044,10 +4045,11 @@ void ext4_discard_preallocations(struct inode *inode)
BUG_ON(pa->pa_type != MB_INODE_PA);
group = ext4_get_group_number(sb, pa->pa_pstart);
err = ext4_mb_load_buddy(sb, group, &e4b);
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
if (err) {
ext4_error(sb, "Error loading buddy information for %u",
group);
ext4_error(sb, "Error %d loading buddy information for %u",
err, group);
continue;
}
......@@ -4303,11 +4305,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
spin_unlock(&lg->lg_prealloc_lock);
list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
int err;
group = ext4_get_group_number(sb, pa->pa_pstart);
if (ext4_mb_load_buddy(sb, group, &e4b)) {
ext4_error(sb, "Error loading buddy information for %u",
group);
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
if (err) {
ext4_error(sb, "Error %d loading buddy information for %u",
err, group);
continue;
}
ext4_lock_group(sb, group);
......@@ -5127,8 +5132,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ret = ext4_mb_load_buddy(sb, group, &e4b);
if (ret) {
ext4_error(sb, "Error in loading buddy "
"information for %u", group);
ext4_warning(sb, "Error %d loading buddy information for %u",
ret, group);
return ret;
}
bitmap = e4b.bd_bitmap;
......
......@@ -1155,12 +1155,11 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
static inline int search_dirblock(struct buffer_head *bh,
struct inode *dir,
struct ext4_filename *fname,
const struct qstr *d_name,
unsigned int offset,
struct ext4_dir_entry_2 **res_dir)
{
return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
fname, d_name, offset, res_dir);
fname, offset, res_dir);
}
/*
......@@ -1262,7 +1261,6 @@ static inline bool ext4_match(const struct ext4_filename *fname,
*/
int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
struct inode *dir, struct ext4_filename *fname,
const struct qstr *d_name,
unsigned int offset, struct ext4_dir_entry_2 **res_dir)
{
struct ext4_dir_entry_2 * de;
......@@ -1355,7 +1353,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
if (ext4_has_inline_data(dir)) {
int has_inline_data = 1;
ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir,
ret = ext4_find_inline_entry(dir, &fname, res_dir,
&has_inline_data);
if (has_inline_data) {
if (inlined)
......@@ -1447,7 +1445,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto next;
}
set_buffer_verified(bh);
i = search_dirblock(bh, dir, &fname, d_name,
i = search_dirblock(bh, dir, &fname,
block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
if (i == 1) {
EXT4_I(dir)->i_dir_start_lookup = block;
......@@ -1488,7 +1486,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
{
struct super_block * sb = dir->i_sb;
struct dx_frame frames[2], *frame;
const struct qstr *d_name = fname->usr_fname;
struct buffer_head *bh;
ext4_lblk_t block;
int retval;
......@@ -1505,7 +1502,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
if (IS_ERR(bh))
goto errout;
retval = search_dirblock(bh, dir, fname, d_name,
retval = search_dirblock(bh, dir, fname,
block << EXT4_BLOCK_SIZE_BITS(sb),
res_dir);
if (retval == 1)
......@@ -1530,7 +1527,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
bh = NULL;
errout:
dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name));
success:
dx_release(frames);
return bh;
......
......@@ -848,14 +848,9 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
{
int type;
if (ext4_has_feature_quota(sb)) {
dquot_disable(sb, -1,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
} else {
/* Use our quota_off function to clear inode flags etc. */
for (type = 0; type < EXT4_MAXQUOTAS; type++)
ext4_quota_off(sb, type);
}
/* Use our quota_off function to clear inode flags etc. */
for (type = 0; type < EXT4_MAXQUOTAS; type++)
ext4_quota_off(sb, type);
}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
......@@ -1179,6 +1174,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
return res;
}
res = dquot_initialize(inode);
if (res)
return res;
retry:
handle = ext4_journal_start(inode, EXT4_HT_MISC,
ext4_jbd2_credits_xattr(inode));
......@@ -5485,7 +5483,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
goto out;
err = dquot_quota_off(sb, type);
if (err)
if (err || ext4_has_feature_quota(sb))
goto out_put;
inode_lock(inode);
......@@ -5505,6 +5503,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
out_unlock:
inode_unlock(inode);
out_put:
lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
iput(inode);
return err;
out:
......
......@@ -888,6 +888,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
else {
u32 ref;
WARN_ON_ONCE(dquot_initialize_needed(inode));
/* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
......@@ -954,6 +956,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
/* We need to allocate a new block */
ext4_fsblk_t goal, block;
WARN_ON_ONCE(dquot_initialize_needed(inode));
goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
......@@ -1166,6 +1170,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
return -EINVAL;
if (strlen(name) > 255)
return -ERANGE;
ext4_write_lock_xattr(inode, &no_expand);
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
......@@ -1267,6 +1272,9 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
int error, retries = 0;
int credits = ext4_jbd2_credits_xattr(inode);
error = dquot_initialize(inode);
if (error)
return error;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle)) {
......
......@@ -680,6 +680,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
handle->h_buffer_credits = nblocks;
/*
* Restore the original nofs context because the journal restart
* is basically the same thing as journal stop and start.
* start_this_handle will start a new nofs context.
*/
memalloc_nofs_restore(handle->saved_alloc_context);
ret = start_this_handle(journal, handle, gfp_mask);
return ret;
}
......
......@@ -1512,6 +1512,22 @@ int dquot_initialize(struct inode *inode)
}
EXPORT_SYMBOL(dquot_initialize);
bool dquot_initialize_needed(struct inode *inode)
{
struct dquot **dquots;
int i;
if (!dquot_active(inode))
return false;
dquots = i_dquot(inode);
for (i = 0; i < MAXQUOTAS; i++)
if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
return true;
return false;
}
EXPORT_SYMBOL(dquot_initialize_needed);
/*
* Release all quotas referenced by inode.
*
......
......@@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
int dquot_initialize(struct inode *inode);
bool dquot_initialize_needed(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
......@@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode)
return 0;
}
static inline bool dquot_initialize_needed(struct inode *inode)
{
return false;
}
static inline void dquot_drop(struct inode *inode)
{
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册