提交 2240a7bb 编写于 作者: L Linus Torvalds

Merge tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: handle EOF correctly in ext4_bio_write_page()
  ext4: remove a wrong BUG_ON in ext4_ext_convert_to_initialized
  ext4: correctly handle pages w/o buffers in ext4_discard_partial_buffers()
  ext4: avoid potential hang in mpage_submit_io() when blocksize < pagesize
  ext4: avoid hangs in ext4_da_should_update_i_disksize()
  ext4: display the correct mount option in /proc/mounts for [no]init_itable
  ext4: Fix crash due to getting bogus eh_depth value on big-endian systems
  ext4: fix ext4_end_io_dio() racing against fsync()

.. using the new signed tag merge of git that now verifies the gpg
signature automatically.  Yay.  The branchname was just 'dev', which is
prettier.  I'll tell Ted to use nicer tag names for future cases.
...@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, ...@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh))); ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
neh->eh_depth = cpu_to_le16(neh->eh_depth + 1); neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
out: out:
brelse(bh); brelse(bh);
...@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ...@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* Pre-conditions */ /* Pre-conditions */
BUG_ON(!ext4_ext_is_uninitialized(ex)); BUG_ON(!ext4_ext_is_uninitialized(ex));
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
/* /*
* Attempt to transfer newly initialized blocks from the currently * Attempt to transfer newly initialized blocks from the currently
......
...@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd, ...@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_unwritten(bh); clear_buffer_unwritten(bh);
} }
/* skip page if block allocation undone */ /*
if (buffer_delay(bh) || buffer_unwritten(bh)) * skip page if block allocation undone and
* block is dirty
*/
if (ext4_bh_delay_or_unwritten(NULL, bh))
skip_page = 1; skip_page = 1;
bh = bh->b_this_page; bh = bh->b_this_page;
block_start += bh->b_size; block_start += bh->b_size;
...@@ -2387,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, ...@@ -2387,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index; pgoff_t index;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
handle_t *handle; handle_t *handle;
loff_t page_len;
index = pos >> PAGE_CACHE_SHIFT; index = pos >> PAGE_CACHE_SHIFT;
...@@ -2434,13 +2436,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, ...@@ -2434,13 +2436,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
*/ */
if (pos + len > inode->i_size) if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode); ext4_truncate_failed_write(inode);
} else {
page_len = pos & (PAGE_CACHE_SIZE - 1);
if (page_len > 0) {
ret = ext4_discard_partial_page_buffers_no_lock(handle,
inode, page, pos - page_len, page_len,
EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
}
} }
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
...@@ -2483,7 +2478,6 @@ static int ext4_da_write_end(struct file *file, ...@@ -2483,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
loff_t new_i_size; loff_t new_i_size;
unsigned long start, end; unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata; int write_mode = (int)(unsigned long)fsdata;
loff_t page_len;
if (write_mode == FALL_BACK_TO_NONDELALLOC) { if (write_mode == FALL_BACK_TO_NONDELALLOC) {
if (ext4_should_order_data(inode)) { if (ext4_should_order_data(inode)) {
...@@ -2508,7 +2502,7 @@ static int ext4_da_write_end(struct file *file, ...@@ -2508,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
*/ */
new_i_size = pos + copied; new_i_size = pos + copied;
if (new_i_size > EXT4_I(inode)->i_disksize) { if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_da_should_update_i_disksize(page, end)) { if (ext4_da_should_update_i_disksize(page, end)) {
down_write(&EXT4_I(inode)->i_data_sem); down_write(&EXT4_I(inode)->i_data_sem);
if (new_i_size > EXT4_I(inode)->i_disksize) { if (new_i_size > EXT4_I(inode)->i_disksize) {
...@@ -2532,16 +2526,6 @@ static int ext4_da_write_end(struct file *file, ...@@ -2532,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
} }
ret2 = generic_write_end(file, mapping, pos, len, copied, ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata); page, fsdata);
page_len = PAGE_CACHE_SIZE -
((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
if (page_len > 0) {
ret = ext4_discard_partial_page_buffers_no_lock(handle,
inode, page, pos + copied - 1, page_len,
EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
}
copied = ret2; copied = ret2;
if (ret2 < 0) if (ret2 < 0)
ret = ret2; ret = ret2;
...@@ -2781,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, ...@@ -2781,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
iocb->private, io_end->inode->i_ino, iocb, offset, iocb->private, io_end->inode->i_ino, iocb, offset,
size); size);
iocb->private = NULL;
/* if not aio dio with unwritten extents, just free io and return */ /* if not aio dio with unwritten extents, just free io and return */
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end); ext4_free_io_end(io_end);
iocb->private = NULL;
out: out:
if (is_async) if (is_async)
aio_complete(iocb, ret, 0); aio_complete(iocb, ret, 0);
...@@ -2807,7 +2792,6 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, ...@@ -2807,7 +2792,6 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
/* queue the work to convert unwritten extents to written */ /* queue the work to convert unwritten extents to written */
iocb->private = NULL;
queue_work(wq, &io_end->work); queue_work(wq, &io_end->work);
/* XXX: probably should move into the real I/O completion handler */ /* XXX: probably should move into the real I/O completion handler */
...@@ -3203,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, ...@@ -3203,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page)) { if (!page_has_buffers(page))
/* create_empty_buffers(page, blocksize, 0);
* If the range to be discarded covers a partial block
* we need to get the page buffers. This is because
* partial blocks cannot be released and the page needs
* to be updated with the contents of the block before
* we write the zeros on top of it.
*/
if ((from & (blocksize - 1)) ||
((from + length) & (blocksize - 1))) {
create_empty_buffers(page, blocksize, 0);
} else {
/*
* If there are no partial blocks,
* there is nothing to update,
* so we can return now
*/
return 0;
}
}
/* Find the buffer that contains "offset" */ /* Find the buffer that contains "offset" */
bh = page_buffers(page); bh = page_buffers(page);
......
...@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
block_end = block_start + blocksize; block_end = block_start + blocksize;
if (block_start >= len) { if (block_start >= len) {
/*
* Comments copied from block_write_full_page_endio:
*
* The page straddles i_size. It must be zeroed out on
* each and every writepage invocation because it may
* be mmapped. "A file is mapped in multiples of the
* page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when
* mapped, and writes to that region are not written
* out to the file."
*/
zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
......
...@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) ...@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",block_validity"); seq_puts(seq, ",block_validity");
if (!test_opt(sb, INIT_INODE_TABLE)) if (!test_opt(sb, INIT_INODE_TABLE))
seq_puts(seq, ",noinit_inode_table"); seq_puts(seq, ",noinit_itable");
else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT) else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
seq_printf(seq, ",init_inode_table=%u", seq_printf(seq, ",init_itable=%u",
(unsigned) sbi->s_li_wait_mult); (unsigned) sbi->s_li_wait_mult);
ext4_show_quota_options(seq, sb); ext4_show_quota_options(seq, sb);
...@@ -1333,8 +1333,7 @@ enum { ...@@ -1333,8 +1333,7 @@ enum {
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock, Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_init_inode_table, Opt_noinit_inode_table,
}; };
static const match_table_t tokens = { static const match_table_t tokens = {
...@@ -1407,9 +1406,9 @@ static const match_table_t tokens = { ...@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
{Opt_dioread_lock, "dioread_lock"}, {Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"}, {Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"}, {Opt_nodiscard, "nodiscard"},
{Opt_init_inode_table, "init_itable=%u"}, {Opt_init_itable, "init_itable=%u"},
{Opt_init_inode_table, "init_itable"}, {Opt_init_itable, "init_itable"},
{Opt_noinit_inode_table, "noinit_itable"}, {Opt_noinit_itable, "noinit_itable"},
{Opt_err, NULL}, {Opt_err, NULL},
}; };
...@@ -1892,7 +1891,7 @@ static int parse_options(char *options, struct super_block *sb, ...@@ -1892,7 +1891,7 @@ static int parse_options(char *options, struct super_block *sb,
case Opt_dioread_lock: case Opt_dioread_lock:
clear_opt(sb, DIOREAD_NOLOCK); clear_opt(sb, DIOREAD_NOLOCK);
break; break;
case Opt_init_inode_table: case Opt_init_itable:
set_opt(sb, INIT_INODE_TABLE); set_opt(sb, INIT_INODE_TABLE);
if (args[0].from) { if (args[0].from) {
if (match_int(&args[0], &option)) if (match_int(&args[0], &option))
...@@ -1903,7 +1902,7 @@ static int parse_options(char *options, struct super_block *sb, ...@@ -1903,7 +1902,7 @@ static int parse_options(char *options, struct super_block *sb,
return 0; return 0;
sbi->s_li_wait_mult = option; sbi->s_li_wait_mult = option;
break; break;
case Opt_noinit_inode_table: case Opt_noinit_itable:
clear_opt(sb, INIT_INODE_TABLE); clear_opt(sb, INIT_INODE_TABLE);
break; break;
default: default:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册