提交 3b423417 编写于 作者: C Chao Yu 提交者: Greg Kroah-Hartman

staging: erofs: clean up erofs_map_blocks_iter

This patch cleans up erofs_map_blocks* function and structure family,
just simply the code, no logic change.
Reviewed-by: NGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: NChao Yu <yuchao0@huawei.com>
Signed-off-by: NGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 6af7b483
...@@ -165,37 +165,16 @@ static int erofs_map_blocks_flatmode(struct inode *inode, ...@@ -165,37 +165,16 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
return err; return err;
} }
int erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map,
struct page **mpage_ret, int flags)
{
/* by default, reading raw data never use erofs_map_blocks_iter */
if (unlikely(!is_inode_layout_compression(inode))) {
if (*mpage_ret)
put_page(*mpage_ret);
*mpage_ret = NULL;
return erofs_map_blocks(inode, map, flags);
}
#ifdef CONFIG_EROFS_FS_ZIP
return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
#else
/* data compression is not available */
return -ENOTSUPP;
#endif
}
int erofs_map_blocks(struct inode *inode, int erofs_map_blocks(struct inode *inode,
struct erofs_map_blocks *map, int flags) struct erofs_map_blocks *map, int flags)
{ {
if (unlikely(is_inode_layout_compression(inode))) { if (unlikely(is_inode_layout_compression(inode))) {
struct page *mpage = NULL; int err = z_erofs_map_blocks_iter(inode, map, flags);
int err;
err = erofs_map_blocks_iter(inode, map, &mpage, flags); if (map->mpage) {
if (mpage) put_page(map->mpage);
put_page(mpage); map->mpage = NULL;
}
return err; return err;
} }
return erofs_map_blocks_flatmode(inode, map, flags); return erofs_map_blocks_flatmode(inode, map, flags);
......
...@@ -461,11 +461,26 @@ struct erofs_map_blocks { ...@@ -461,11 +461,26 @@ struct erofs_map_blocks {
u64 m_plen, m_llen; u64 m_plen, m_llen;
unsigned int m_flags; unsigned int m_flags;
struct page *mpage;
}; };
/* Flags used by erofs_map_blocks() */ /* Flags used by erofs_map_blocks() */
#define EROFS_GET_BLOCKS_RAW 0x0001 #define EROFS_GET_BLOCKS_RAW 0x0001
#ifdef CONFIG_EROFS_FS_ZIP
int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map,
int flags);
#else
static inline int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map,
int flags)
{
return -ENOTSUPP;
}
#endif
/* data.c */ /* data.c */
static inline struct bio * static inline struct bio *
erofs_grab_bio(struct super_block *sb, erofs_grab_bio(struct super_block *sb,
...@@ -522,19 +537,6 @@ static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb, ...@@ -522,19 +537,6 @@ static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
} }
extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
struct page **, int);
struct erofs_map_blocks_iter {
struct erofs_map_blocks map;
struct page *mpage;
};
#ifdef CONFIG_EROFS_FS_ZIP
extern int z_erofs_map_blocks_iter(struct inode *,
struct erofs_map_blocks *,
struct page **, int);
#endif
static inline struct page * static inline struct page *
erofs_get_inline_page(struct inode *inode, erofs_get_inline_page(struct inode *inode,
......
...@@ -636,7 +636,7 @@ struct z_erofs_vle_frontend { ...@@ -636,7 +636,7 @@ struct z_erofs_vle_frontend {
struct inode *const inode; struct inode *const inode;
struct z_erofs_vle_work_builder builder; struct z_erofs_vle_work_builder builder;
struct erofs_map_blocks_iter m_iter; struct erofs_map_blocks map;
z_erofs_vle_owned_workgrp_t owned_head; z_erofs_vle_owned_workgrp_t owned_head;
...@@ -647,8 +647,9 @@ struct z_erofs_vle_frontend { ...@@ -647,8 +647,9 @@ struct z_erofs_vle_frontend {
#define VLE_FRONTEND_INIT(__i) { \ #define VLE_FRONTEND_INIT(__i) { \
.inode = __i, \ .inode = __i, \
.m_iter = { \ .map = { \
{ .m_llen = 0, .m_plen = 0 }, \ .m_llen = 0, \
.m_plen = 0, \
.mpage = NULL \ .mpage = NULL \
}, \ }, \
.builder = VLE_WORK_BUILDER_INIT(), \ .builder = VLE_WORK_BUILDER_INIT(), \
...@@ -681,8 +682,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -681,8 +682,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
{ {
struct super_block *const sb = fe->inode->i_sb; struct super_block *const sb = fe->inode->i_sb;
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
struct erofs_map_blocks_iter *const m = &fe->m_iter; struct erofs_map_blocks *const map = &fe->map;
struct erofs_map_blocks *const map = &m->map;
struct z_erofs_vle_work_builder *const builder = &fe->builder; struct z_erofs_vle_work_builder *const builder = &fe->builder;
const loff_t offset = page_offset(page); const loff_t offset = page_offset(page);
...@@ -715,7 +715,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -715,7 +715,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
map->m_la = offset + cur; map->m_la = offset + cur;
map->m_llen = 0; map->m_llen = 0;
err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0); err = z_erofs_map_blocks_iter(fe->inode, map, 0);
if (unlikely(err)) if (unlikely(err))
goto err_out; goto err_out;
...@@ -1484,8 +1484,8 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, ...@@ -1484,8 +1484,8 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
z_erofs_submit_and_unzip(&f, &pagepool, true); z_erofs_submit_and_unzip(&f, &pagepool, true);
out: out:
if (f.m_iter.mpage) if (f.map.mpage)
put_page(f.m_iter.mpage); put_page(f.map.mpage);
/* clean up the remaining free pages */ /* clean up the remaining free pages */
put_pages_list(&pagepool); put_pages_list(&pagepool);
...@@ -1555,8 +1555,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, ...@@ -1555,8 +1555,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
z_erofs_submit_and_unzip(&f, &pagepool, sync); z_erofs_submit_and_unzip(&f, &pagepool, sync);
if (f.m_iter.mpage) if (f.map.mpage)
put_page(f.m_iter.mpage); put_page(f.map.mpage);
/* clean up the remaining free pages */ /* clean up the remaining free pages */
put_pages_list(&pagepool); put_pages_list(&pagepool);
...@@ -1701,14 +1701,14 @@ vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx, ...@@ -1701,14 +1701,14 @@ vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
int z_erofs_map_blocks_iter(struct inode *inode, int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map, struct erofs_map_blocks *map,
struct page **mpage_ret, int flags) int flags)
{ {
void *kaddr; void *kaddr;
const struct vle_map_blocks_iter_ctx ctx = { const struct vle_map_blocks_iter_ctx ctx = {
.inode = inode, .inode = inode,
.sb = inode->i_sb, .sb = inode->i_sb,
.clusterbits = EROFS_I_SB(inode)->clusterbits, .clusterbits = EROFS_I_SB(inode)->clusterbits,
.mpage_ret = mpage_ret, .mpage_ret = &map->mpage,
.kaddr_ret = &kaddr .kaddr_ret = &kaddr
}; };
const unsigned int clustersize = 1 << ctx.clusterbits; const unsigned int clustersize = 1 << ctx.clusterbits;
...@@ -1722,7 +1722,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1722,7 +1722,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* initialize `pblk' to keep gcc from printing foolish warnings */ /* initialize `pblk' to keep gcc from printing foolish warnings */
erofs_blk_t mblk, pblk = 0; erofs_blk_t mblk, pblk = 0;
struct page *mpage = *mpage_ret; struct page *mpage = map->mpage;
struct z_erofs_vle_decompressed_index *di; struct z_erofs_vle_decompressed_index *di;
unsigned int cluster_type, logical_cluster_ofs; unsigned int cluster_type, logical_cluster_ofs;
int err = 0; int err = 0;
...@@ -1758,7 +1758,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1758,7 +1758,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
err = PTR_ERR(mpage); err = PTR_ERR(mpage);
goto out; goto out;
} }
*mpage_ret = mpage; map->mpage = mpage;
} else { } else {
lock_page(mpage); lock_page(mpage);
DBG_BUGON(!PageUptodate(mpage)); DBG_BUGON(!PageUptodate(mpage));
...@@ -1818,7 +1818,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1818,7 +1818,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* get the correspoinding first chunk */ /* get the correspoinding first chunk */
err = vle_get_logical_extent_head(&ctx, lcn, &ofs, err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
&pblk, &map->m_flags); &pblk, &map->m_flags);
mpage = *mpage_ret; mpage = map->mpage;
if (unlikely(err)) { if (unlikely(err)) {
if (mpage) if (mpage)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册