提交 1ff7bd3b 编写于 作者: J Jaegeuk Kim

f2fs: introduce a bio array for per-page write bios

The f2fs has three bio types, NODE, DATA, and META, and manages some data
structures per each bio types.

The codes are a little bit messy, thus, this patch introduces a bio array
which groups individual data structures as follows.

struct f2fs_bio_info {
	struct bio *bio;		/* bios to merge */
	sector_t last_block_in_bio;	/* last block number */
	struct mutex io_mutex;		/* mutex for bio */
};

struct f2fs_sb_info {
	...
	struct f2fs_bio_info write_io[NR_PAGE_TYPE];	/* for write bios */
	...
};

The code changes from this new data structure are trivial.
Signed-off-by: NJaegeuk Kim <jaegeuk.kim@samsung.com>
上级 c11abd1a
...@@ -361,6 +361,12 @@ enum page_type { ...@@ -361,6 +361,12 @@ enum page_type {
META_FLUSH, META_FLUSH,
}; };
struct f2fs_bio_info {
struct bio *bio; /* bios to merge */
sector_t last_block_in_bio; /* last block number */
struct mutex io_mutex; /* mutex for bio */
};
struct f2fs_sb_info { struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */ struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */ struct proc_dir_entry *s_proc; /* proc entry */
...@@ -374,9 +380,9 @@ struct f2fs_sb_info { ...@@ -374,9 +380,9 @@ struct f2fs_sb_info {
/* for segment-related operations */ /* for segment-related operations */
struct f2fs_sm_info *sm_info; /* segment manager */ struct f2fs_sm_info *sm_info; /* segment manager */
struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ /* for bio operations */
struct mutex write_mutex[NR_PAGE_TYPE]; /* mutex for writing IOs */ struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
/* for checkpoint */ /* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
......
...@@ -836,65 +836,65 @@ static void do_submit_bio(struct f2fs_sb_info *sbi, ...@@ -836,65 +836,65 @@ static void do_submit_bio(struct f2fs_sb_info *sbi,
{ {
int rw = sync ? WRITE_SYNC : WRITE; int rw = sync ? WRITE_SYNC : WRITE;
enum page_type btype = PAGE_TYPE_OF_BIO(type); enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct bio *bio = sbi->bio[btype]; struct f2fs_bio_info *io = &sbi->write_io[btype];
struct bio_private *p; struct bio_private *p;
if (!bio) if (!io->bio)
return; return;
sbi->bio[btype] = NULL;
if (type >= META_FLUSH) if (type >= META_FLUSH)
rw = WRITE_FLUSH_FUA; rw = WRITE_FLUSH_FUA;
if (btype == META) if (btype == META)
rw |= REQ_META; rw |= REQ_META;
p = bio->bi_private; p = io->bio->bi_private;
p->sbi = sbi; p->sbi = sbi;
bio->bi_end_io = f2fs_end_io_write; io->bio->bi_end_io = f2fs_end_io_write;
trace_f2fs_do_submit_bio(sbi->sb, btype, sync, bio); trace_f2fs_do_submit_bio(sbi->sb, btype, sync, io->bio);
if (type == META_FLUSH) { if (type == META_FLUSH) {
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
p->is_sync = true; p->is_sync = true;
p->wait = &wait; p->wait = &wait;
submit_bio(rw, bio); submit_bio(rw, io->bio);
wait_for_completion(&wait); wait_for_completion(&wait);
} else { } else {
p->is_sync = false; p->is_sync = false;
submit_bio(rw, bio); submit_bio(rw, io->bio);
} }
io->bio = NULL;
} }
void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
{ {
enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = &sbi->write_io[PAGE_TYPE_OF_BIO(type)];
if (!sbi->bio[btype]) if (!io->bio)
return; return;
mutex_lock(&sbi->write_mutex[btype]); mutex_lock(&io->io_mutex);
do_submit_bio(sbi, type, sync); do_submit_bio(sbi, type, sync);
mutex_unlock(&sbi->write_mutex[btype]); mutex_unlock(&io->io_mutex);
} }
static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
block_t blk_addr, enum page_type type) block_t blk_addr, enum page_type type)
{ {
struct block_device *bdev = sbi->sb->s_bdev; struct block_device *bdev = sbi->sb->s_bdev;
struct f2fs_bio_info *io = &sbi->write_io[type];
int bio_blocks; int bio_blocks;
verify_block_addr(sbi, blk_addr); verify_block_addr(sbi, blk_addr);
mutex_lock(&sbi->write_mutex[type]); mutex_lock(&io->io_mutex);
inc_page_count(sbi, F2FS_WRITEBACK); inc_page_count(sbi, F2FS_WRITEBACK);
if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) if (io->bio && io->last_block_in_bio != blk_addr - 1)
do_submit_bio(sbi, type, false); do_submit_bio(sbi, type, false);
alloc_new: alloc_new:
if (sbi->bio[type] == NULL) { if (io->bio == NULL) {
struct bio_private *priv; struct bio_private *priv;
retry: retry:
priv = kmalloc(sizeof(struct bio_private), GFP_NOFS); priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
...@@ -904,9 +904,9 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, ...@@ -904,9 +904,9 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
} }
bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); io->bio = f2fs_bio_alloc(bdev, bio_blocks);
sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
sbi->bio[type]->bi_private = priv; io->bio->bi_private = priv;
/* /*
* The end_io will be assigned at the sumbission phase. * The end_io will be assigned at the sumbission phase.
* Until then, let bio_add_page() merge consecutive IOs as much * Until then, let bio_add_page() merge consecutive IOs as much
...@@ -914,15 +914,15 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, ...@@ -914,15 +914,15 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
*/ */
} }
if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) { PAGE_CACHE_SIZE) {
do_submit_bio(sbi, type, false); do_submit_bio(sbi, type, false);
goto alloc_new; goto alloc_new;
} }
sbi->last_block_in_bio[type] = blk_addr; io->last_block_in_bio = blk_addr;
mutex_unlock(&sbi->write_mutex[type]); mutex_unlock(&io->io_mutex);
trace_f2fs_submit_write_page(page, blk_addr, type); trace_f2fs_submit_write_page(page, blk_addr, type);
} }
......
...@@ -879,7 +879,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -879,7 +879,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
spin_lock_init(&sbi->stat_lock); spin_lock_init(&sbi->stat_lock);
for (i = 0; i < NR_PAGE_TYPE; i++) for (i = 0; i < NR_PAGE_TYPE; i++)
mutex_init(&sbi->write_mutex[i]); mutex_init(&sbi->write_io[i].io_mutex);
init_rwsem(&sbi->cp_rwsem); init_rwsem(&sbi->cp_rwsem);
init_waitqueue_head(&sbi->cp_wait); init_waitqueue_head(&sbi->cp_wait);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册