提交 c6e08f0d 编写于 作者: Z Zhang Yi 提交者: Zheng Zengkai

reiserfs: replace ll_rw_block()

mainline inclusion
from mainline-v6.1-rc1
commit d554822e
category: bugfix
bugzilla: 187878,https://gitee.com/openeuler/kernel/issues/I5QJH9
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=v6.1-rc2&id=d554822e82cc99db53b845f3e60dc13e56ad4575

--------------------------------

ll_rw_block() is not safe for the sync read/write path because it cannot
guarantee that submitting read/write IO if the buffer has been locked.
We could get false positive EIO after wait_on_buffer() in read path if
the buffer has been locked by others. So stop using ll_rw_block() in
reiserfs. We also switch to new bh_readahead_batch() helper for the
buffer array readahead path.

Link: https://lkml.kernel.org/r/20220901133505.2510834-10-yi.zhang@huawei.comSigned-off-by: NZhang Yi <yi.zhang@huawei.com>
Reviewed-by: NJan Kara <jack@suse.cz>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>

Conflict:
  fs/reiserfs/journal.c
  fs/reiserfs/stree.c
  fs/reiserfs/super.c
Signed-off-by: NLi Lingfeng <lilingfeng3@huawei.com>
Reviewed-by: NZhang Yi <yi.zhang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 71a7991b
......@@ -870,7 +870,7 @@ static int write_ordered_buffers(spinlock_t * lock,
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
write_dirty_buffer(bh, 0);
spin_lock(lock);
}
put_bh(bh);
......@@ -1054,7 +1054,7 @@ static int flush_commit_list(struct super_block *s,
if (tbh) {
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
write_dirty_buffer(tbh, 0);
reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
......@@ -2239,7 +2239,7 @@ static int journal_read_transaction(struct super_block *sb,
}
}
/* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
bh_read_batch(get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]);
......@@ -2341,10 +2341,11 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else
bhlist[j++] = bh;
}
ll_rw_block(REQ_OP_READ, 0, j, bhlist);
bh = bhlist[0];
bh_read_nowait(bh, 0);
bh_readahead_batch(j - 1, &bhlist[1], 0);
for (i = 1; i < j; i++)
brelse(bhlist[i]);
bh = bhlist[0];
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
......
......@@ -579,7 +579,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) {
if (depth == -1)
depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, bh + j);
bh_readahead(bh[j], REQ_RAHEAD);
}
brelse(bh[j]);
}
......@@ -685,7 +685,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb);
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
bh_read_nowait(bh, 0);
wait_on_buffer(bh);
if (depth != -1)
......
......@@ -1708,9 +1708,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s));
wait_on_buffer(SB_BUFFER_WITH_SB(s));
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
if (bh_read(SB_BUFFER_WITH_SB(s), 0) < 0) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册