提交 cd1e5afe 编写于 作者: M Matthew Wilcox (Oracle)

iomap: Pass the iomap_page into iomap_set_range_uptodate

All but one caller already has the iomap_page, so we can avoid getting
it again.
Signed-off-by: NMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: NDarrick J. Wong <djwong@kernel.org>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
上级 8306a5f5
...@@ -134,11 +134,9 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, ...@@ -134,11 +134,9 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
*lenp = plen; *lenp = plen;
} }
static void static void iomap_iop_set_range_uptodate(struct page *page,
iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) struct iomap_page *iop, unsigned off, unsigned len)
{ {
struct folio *folio = page_folio(page);
struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
unsigned first = off >> inode->i_blkbits; unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits; unsigned last = (off + len - 1) >> inode->i_blkbits;
...@@ -151,14 +149,14 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) ...@@ -151,14 +149,14 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
spin_unlock_irqrestore(&iop->uptodate_lock, flags); spin_unlock_irqrestore(&iop->uptodate_lock, flags);
} }
static void static void iomap_set_range_uptodate(struct page *page,
iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) struct iomap_page *iop, unsigned off, unsigned len)
{ {
if (PageError(page)) if (PageError(page))
return; return;
if (page_has_private(page)) if (iop)
iomap_iop_set_range_uptodate(page, off, len); iomap_iop_set_range_uptodate(page, iop, off, len);
else else
SetPageUptodate(page); SetPageUptodate(page);
} }
...@@ -174,7 +172,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error) ...@@ -174,7 +172,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
ClearPageUptodate(page); ClearPageUptodate(page);
SetPageError(page); SetPageError(page);
} else { } else {
iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); iomap_set_range_uptodate(page, iop, bvec->bv_offset,
bvec->bv_len);
} }
if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending)) if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
...@@ -213,6 +212,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter, ...@@ -213,6 +212,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
struct page *page) struct page *page)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
struct iomap_page *iop;
const struct iomap *iomap = iomap_iter_srcmap(iter); const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset; size_t size = i_size_read(iter->inode) - iomap->offset;
size_t poff = offset_in_page(iomap->offset); size_t poff = offset_in_page(iomap->offset);
...@@ -229,13 +229,15 @@ static int iomap_read_inline_data(const struct iomap_iter *iter, ...@@ -229,13 +229,15 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
if (WARN_ON_ONCE(size > iomap->length)) if (WARN_ON_ONCE(size > iomap->length))
return -EIO; return -EIO;
if (poff > 0) if (poff > 0)
iomap_page_create(iter->inode, folio); iop = iomap_page_create(iter->inode, folio);
else
iop = to_iomap_page(folio);
addr = kmap_local_page(page) + poff; addr = kmap_local_page(page) + poff;
memcpy(addr, iomap->inline_data, size); memcpy(addr, iomap->inline_data, size);
memset(addr + size, 0, PAGE_SIZE - poff - size); memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr); kunmap_local(addr);
iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff); iomap_set_range_uptodate(page, iop, poff, PAGE_SIZE - poff);
return 0; return 0;
} }
...@@ -273,7 +275,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, ...@@ -273,7 +275,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (iomap_block_needs_zeroing(iter, pos)) { if (iomap_block_needs_zeroing(iter, pos)) {
zero_user(page, poff, plen); zero_user(page, poff, plen);
iomap_set_range_uptodate(page, poff, plen); iomap_set_range_uptodate(page, iop, poff, plen);
goto done; goto done;
} }
...@@ -589,7 +591,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, ...@@ -589,7 +591,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (status) if (status)
return status; return status;
} }
iomap_set_range_uptodate(page, poff, plen); iomap_set_range_uptodate(page, iop, poff, plen);
} while ((block_start += plen) < block_end); } while ((block_start += plen) < block_end);
return 0; return 0;
...@@ -661,6 +663,8 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, ...@@ -661,6 +663,8 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct page *page) size_t copied, struct page *page)
{ {
struct folio *folio = page_folio(page);
struct iomap_page *iop = to_iomap_page(folio);
flush_dcache_page(page); flush_dcache_page(page);
/* /*
...@@ -676,7 +680,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, ...@@ -676,7 +680,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
*/ */
if (unlikely(copied < len && !PageUptodate(page))) if (unlikely(copied < len && !PageUptodate(page)))
return 0; return 0;
iomap_set_range_uptodate(page, offset_in_page(pos), len); iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
__set_page_dirty_nobuffers(page); __set_page_dirty_nobuffers(page);
return copied; return copied;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册