提交 9ace868a 编写于 作者: L Linus Torvalds

Merge tag 'iomap-5.0-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull iomap fixes from Darrick Wong:
 "A couple of iomap fixes to eliminate some memory corruption and hang
  problems that were reported:

   - fix page migration when using iomap for pagecache management

   - fix a use-after-free bug in the directio code"

* tag 'iomap-5.0-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  iomap: fix a use after free in iomap_dio_rw
  iomap: get/put the page in iomap_page_create/release()
...@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page) ...@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
atomic_set(&iop->read_count, 0); atomic_set(&iop->read_count, 0);
atomic_set(&iop->write_count, 0); atomic_set(&iop->write_count, 0);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
/*
* migrate_page_move_mapping() assumes that pages with private data have
* their count elevated by 1.
*/
get_page(page);
set_page_private(page, (unsigned long)iop); set_page_private(page, (unsigned long)iop);
SetPagePrivate(page); SetPagePrivate(page);
return iop; return iop;
...@@ -132,6 +138,7 @@ iomap_page_release(struct page *page) ...@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
WARN_ON_ONCE(atomic_read(&iop->write_count)); WARN_ON_ONCE(atomic_read(&iop->write_count));
ClearPagePrivate(page); ClearPagePrivate(page);
set_page_private(page, 0); set_page_private(page, 0);
put_page(page);
kfree(iop); kfree(iop);
} }
...@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage, ...@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
if (page_has_private(page)) { if (page_has_private(page)) {
ClearPagePrivate(page); ClearPagePrivate(page);
get_page(newpage);
set_page_private(newpage, page_private(page)); set_page_private(newpage, page_private(page));
set_page_private(page, 0); set_page_private(page, 0);
put_page(page);
SetPagePrivate(newpage); SetPagePrivate(newpage);
} }
...@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos = iocb->ki_pos, start = pos; loff_t pos = iocb->ki_pos, start = pos;
loff_t end = iocb->ki_pos + count - 1, ret = 0; loff_t end = iocb->ki_pos + count - 1, ret = 0;
unsigned int flags = IOMAP_DIRECT; unsigned int flags = IOMAP_DIRECT;
bool wait_for_completion = is_sync_kiocb(iocb);
struct blk_plug plug; struct blk_plug plug;
struct iomap_dio *dio; struct iomap_dio *dio;
...@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->end_io = end_io; dio->end_io = end_io;
dio->error = 0; dio->error = 0;
dio->flags = 0; dio->flags = 0;
dio->wait_for_completion = is_sync_kiocb(iocb);
dio->submit.iter = iter; dio->submit.iter = iter;
dio->submit.waiter = current; dio->submit.waiter = current;
...@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio_warn_stale_pagecache(iocb->ki_filp); dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0; ret = 0;
if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
!inode->i_sb->s_dio_done_wq) { !inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb); ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0) if (ret < 0)
...@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret <= 0) { if (ret <= 0) {
/* magic error code to fall back to buffered I/O */ /* magic error code to fall back to buffered I/O */
if (ret == -ENOTBLK) { if (ret == -ENOTBLK) {
dio->wait_for_completion = true; wait_for_completion = true;
ret = 0; ret = 0;
} }
break; break;
...@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (dio->flags & IOMAP_DIO_WRITE_FUA) if (dio->flags & IOMAP_DIO_WRITE_FUA)
dio->flags &= ~IOMAP_DIO_NEED_SYNC; dio->flags &= ~IOMAP_DIO_NEED_SYNC;
/*
* We are about to drop our additional submission reference, which
* might be the last reference to the dio. There are three three
* different ways we can progress here:
*
* (a) If this is the last reference we will always complete and free
* the dio ourselves.
* (b) If this is not the last reference, and we serve an asynchronous
* iocb, we must never touch the dio after the decrement, the
* I/O completion handler will complete and free it.
* (c) If this is not the last reference, but we serve a synchronous
* iocb, the I/O completion handler will wake us up on the drop
* of the final reference, and we will complete and free it here
* after we got woken by the I/O completion handler.
*/
dio->wait_for_completion = wait_for_completion;
if (!atomic_dec_and_test(&dio->ref)) { if (!atomic_dec_and_test(&dio->ref)) {
if (!dio->wait_for_completion) if (!wait_for_completion)
return -EIOCBQUEUED; return -EIOCBQUEUED;
for (;;) { for (;;) {
...@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
} }
ret = iomap_dio_complete(dio); return iomap_dio_complete(dio);
return ret;
out_free_dio: out_free_dio:
kfree(dio); kfree(dio);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册