提交 0d9cf33b 编写于 作者: L Linus Torvalds

Merge tag 'for_v4.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

 - isofs memory leak fix

 - two fsnotify fixes of event mask handling

 - udf fix of UTF-16 handling

 - couple other smaller cleanups

* tag 'for_v4.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  udf: Fix leak of UTF-16 surrogates into encoded strings
  fs: ext2: Adding new return type vm_fault_t
  isofs: fix potential memory leak in mount option parsing
  MAINTAINERS: add an entry for FSNOTIFY infrastructure
  fsnotify: fix typo in a comment about mark->g_list
  fsnotify: fix ignore mask logic in send_to_group()
  isofs compress: Remove VLA usage
  fs: quota: Replace GFP_ATOMIC with GFP_KERNEL in dquot_init
  fanotify: fix logic of events on child
......@@ -5784,6 +5784,14 @@ F: fs/crypto/
F: include/linux/fscrypt*.h
F: Documentation/filesystems/fscrypt.rst
FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE
M: Jan Kara <jack@suse.cz>
R: Amir Goldstein <amir73il@gmail.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/notify/
F: include/linux/fsnotify*.h
FUJITSU LAPTOP EXTRAS
M: Jonathan Woithe <jwoithe@just42.net>
L: platform-driver-x86@vger.kernel.org
......
......@@ -88,11 +88,11 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
* The default page_lock and i_size verification done by non-DAX fault paths
* is sufficient because ext2 doesn't support hole punching.
*/
static int ext2_dax_fault(struct vm_fault *vmf)
static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
int ret;
vm_fault_t ret;
if (vmf->flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(inode->i_sb);
......
......@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/zlib.h>
......@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
struct buffer_head *bhs[needblocks + 1];
struct buffer_head **bhs;
int curbh, curpage;
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
......@@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
if (!bhs) {
*errp = -ENOMEM;
return 0;
}
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
......@@ -190,6 +195,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
kfree(bhs);
return stream.total_out;
}
......@@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
struct page **pages;
pgoff_t index = page->index, end_index;
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
......@@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
full_page = 0;
pcount = 1;
}
pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
sizeof(*pages), GFP_KERNEL);
if (!pages) {
unlock_page(page);
return -ENOMEM;
}
pages[full_page] = page;
for (i = 0; i < pcount; i++, index++) {
......@@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
}
/* At this point, err contains 0 or -EIO depending on the "critical" page */
kfree(pages);
return err;
}
......
......@@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt)
break;
#ifdef CONFIG_JOLIET
case Opt_iocharset:
kfree(popt->iocharset);
popt->iocharset = match_strdup(&args[0]);
if (!popt->iocharset)
return 0;
break;
#endif
case Opt_map_a:
......
......@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
u32 event_mask,
const void *data, int data_type)
{
__u32 marks_mask, marks_ignored_mask;
__u32 marks_mask = 0, marks_ignored_mask = 0;
const struct path *path = data;
pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
......@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
!d_can_lookup(path->dentry))
return false;
if (inode_mark && vfsmnt_mark) {
marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
} else if (inode_mark) {
/*
* if the event is for a child and this inode doesn't care about
* events on the child, don't send it!
*/
if ((event_mask & FS_EVENT_ON_CHILD) &&
!(inode_mark->mask & FS_EVENT_ON_CHILD))
return false;
marks_mask = inode_mark->mask;
marks_ignored_mask = inode_mark->ignored_mask;
} else if (vfsmnt_mark) {
marks_mask = vfsmnt_mark->mask;
marks_ignored_mask = vfsmnt_mark->ignored_mask;
} else {
BUG();
if (inode_mark &&
(!(event_mask & FS_EVENT_ON_CHILD) ||
(inode_mark->mask & FS_EVENT_ON_CHILD))) {
marks_mask |= inode_mark->mask;
marks_ignored_mask |= inode_mark->ignored_mask;
}
if (vfsmnt_mark) {
marks_mask |= vfsmnt_mark->mask;
marks_ignored_mask |= vfsmnt_mark->ignored_mask;
}
if (d_is_dir(path->dentry) &&
......
......@@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell,
struct fsnotify_iter_info *iter_info)
{
struct fsnotify_group *group = NULL;
__u32 inode_test_mask = 0;
__u32 vfsmount_test_mask = 0;
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
__u32 marks_mask = 0;
__u32 marks_ignored_mask = 0;
if (unlikely(!inode_mark && !vfsmount_mark)) {
BUG();
......@@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell,
/* does the inode mark tell us to do something? */
if (inode_mark) {
group = inode_mark->group;
inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
inode_test_mask &= inode_mark->mask;
inode_test_mask &= ~inode_mark->ignored_mask;
marks_mask |= inode_mark->mask;
marks_ignored_mask |= inode_mark->ignored_mask;
}
/* does the vfsmount_mark tell us to do something? */
if (vfsmount_mark) {
vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
group = vfsmount_mark->group;
vfsmount_test_mask &= vfsmount_mark->mask;
vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
if (inode_mark)
vfsmount_test_mask &= ~inode_mark->ignored_mask;
marks_mask |= vfsmount_mark->mask;
marks_ignored_mask |= vfsmount_mark->ignored_mask;
}
pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
" inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
" vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x"
" data=%p data_is=%d cookie=%d\n",
__func__, group, to_tell, mask, inode_mark,
inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
__func__, group, to_tell, mask, inode_mark, vfsmount_mark,
marks_mask, marks_ignored_mask, data,
data_is, cookie);
if (!inode_test_mask && !vfsmount_test_mask)
if (!(test_mask & marks_mask & ~marks_ignored_mask))
return 0;
return group->ops->handle_event(group, to_tell, inode_mark,
......
......@@ -2966,7 +2966,7 @@ static int __init dquot_init(void)
NULL);
order = 0;
dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
if (!dquot_hash)
panic("Cannot create dquot hash table");
......
......@@ -28,6 +28,9 @@
#include "udf_sb.h"
#define SURROGATE_MASK 0xfffff800
#define SURROGATE_PAIR 0x0000d800
static int udf_uni2char_utf8(wchar_t uni,
unsigned char *out,
int boundlen)
......@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni,
if (boundlen <= 0)
return -ENAMETOOLONG;
if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
return -EINVAL;
if (uni < 0x80) {
out[u_len++] = (unsigned char)uni;
} else if (uni < 0x800) {
......
......@@ -248,7 +248,7 @@ struct fsnotify_mark {
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
/* List of marks by group->i_fsnotify_marks. Also reused for queueing
/* List of marks by group->marks_list. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
struct list_head g_list;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册