提交 3cd8a239 编写于 作者: J Jaegeuk Kim

f2fs: cleanup the f2fs_bio_alloc routine

Do cleanup more for better code readability.

- Change the parameter set of f2fs_bio_alloc()
  This function should allocate a bio only since it is not something like
  f2fs_bio_init(). Instead, the caller should initialize the allocated bio.

- Introduce SECTOR_FROM_BLOCK
  This macro translates a block address to its sector address.
Signed-off-by: NJaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: NNamjae Jeon <namjae.jeon@samsung.com>
上级 457d08ee
...@@ -343,11 +343,12 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, ...@@ -343,11 +343,12 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
down_read(&sbi->bio_sem); down_read(&sbi->bio_sem);
/* Allocate a new bio */ /* Allocate a new bio */
bio = f2fs_bio_alloc(bdev, blk_addr << (sbi->log_blocksize - 9), bio = f2fs_bio_alloc(bdev, 1);
1, GFP_NOFS | __GFP_HIGH);
/* Initialize the bio */ /* Initialize the bio */
bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
bio->bi_end_io = read_end_io; bio->bi_end_io = read_end_io;
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
kfree(bio->bi_private); kfree(bio->bi_private);
bio_put(bio); bio_put(bio);
......
...@@ -924,7 +924,7 @@ void clear_prefree_segments(struct f2fs_sb_info *); ...@@ -924,7 +924,7 @@ void clear_prefree_segments(struct f2fs_sb_info *);
int npages_for_summary_flush(struct f2fs_sb_info *); int npages_for_summary_flush(struct f2fs_sb_info *);
void allocate_new_segments(struct f2fs_sb_info *); void allocate_new_segments(struct f2fs_sb_info *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
struct bio *f2fs_bio_alloc(struct block_device *, sector_t, int, gfp_t); struct bio *f2fs_bio_alloc(struct block_device *, int);
void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
int write_meta_page(struct f2fs_sb_info *, struct page *, int write_meta_page(struct f2fs_sb_info *, struct page *,
struct writeback_control *); struct writeback_control *);
......
...@@ -643,23 +643,21 @@ static void f2fs_end_io_write(struct bio *bio, int err) ...@@ -643,23 +643,21 @@ static void f2fs_end_io_write(struct bio *bio, int err)
bio_put(bio); bio_put(bio);
} }
struct bio *f2fs_bio_alloc(struct block_device *bdev, sector_t first_sector, struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
int nr_vecs, gfp_t gfp_flags)
{ {
struct bio *bio; struct bio *bio;
struct bio_private *priv;
/* allocate new bio */
bio = bio_alloc(gfp_flags, nr_vecs);
bio->bi_bdev = bdev;
bio->bi_sector = first_sector;
retry: retry:
bio->bi_private = kmalloc(sizeof(struct bio_private), priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
GFP_NOFS | __GFP_HIGH); if (!priv) {
if (!bio->bi_private) {
cond_resched(); cond_resched();
goto retry; goto retry;
} }
/* No failure on bio allocation */
bio = bio_alloc(GFP_NOIO, npages);
bio->bi_bdev = bdev;
bio->bi_private = priv;
return bio; return bio;
} }
...@@ -711,10 +709,15 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, ...@@ -711,10 +709,15 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
do_submit_bio(sbi, type, false); do_submit_bio(sbi, type, false);
alloc_new: alloc_new:
if (sbi->bio[type] == NULL) if (sbi->bio[type] == NULL) {
sbi->bio[type] = f2fs_bio_alloc(bdev, sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev));
blk_addr << (sbi->log_blocksize - 9), sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
bio_get_nr_vecs(bdev), GFP_NOFS | __GFP_HIGH); /*
* The end_io will be assigned at the sumbission phase.
* Until then, let bio_add_page() merge consecutive IOs as much
* as possible.
*/
}
if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) { PAGE_CACHE_SIZE) {
......
...@@ -82,6 +82,9 @@ ...@@ -82,6 +82,9 @@
(BITS_TO_LONGS(nr) * sizeof(unsigned long)) (BITS_TO_LONGS(nr) * sizeof(unsigned long))
#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments) #define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
#define SECTOR_FROM_BLOCK(sbi, blk_addr) \
(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
/* during checkpoint, bio_private is used to synchronize the last bio */ /* during checkpoint, bio_private is used to synchronize the last bio */
struct bio_private { struct bio_private {
struct f2fs_sb_info *sbi; struct f2fs_sb_info *sbi;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册