提交 9be32d72 编写于 作者: J Jaegeuk Kim

f2fs: do retry operations with cond_resched

This patch revists retrial paths in f2fs.
The basic idea is to use cond_resched instead of retrying from the very early
stage.
Suggested-by: NGu Zheng <guz.fnst@cn.fujitsu.com>
Reviewed-by: NChao Yu <chao2.yu@samsung.com>
Signed-off-by: NJaegeuk Kim <jaegeuk@kernel.org>
上级 769ec6e5
...@@ -1021,6 +1021,13 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, ...@@ -1021,6 +1021,13 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
return entry; return entry;
} }
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *item)
{
while (radix_tree_insert(root, index, item))
cond_resched();
}
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
static inline bool IS_INODE(struct page *page) static inline bool IS_INODE(struct page *page)
......
...@@ -356,12 +356,11 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) ...@@ -356,12 +356,11 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
iput(inode); iput(inode);
return; return;
} }
retry:
new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
new_ie->inode = inode; new_ie->inode = inode;
retry:
if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) { if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
kmem_cache_free(winode_slab, new_ie); cond_resched();
goto retry; goto retry;
} }
list_add_tail(&new_ie->list, &gc_list->ilist); list_add_tail(&new_ie->list, &gc_list->ilist);
......
...@@ -147,7 +147,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, ...@@ -147,7 +147,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
if (get_nat_flag(ne, IS_DIRTY)) if (get_nat_flag(ne, IS_DIRTY))
return; return;
retry:
head = radix_tree_lookup(&nm_i->nat_set_root, set); head = radix_tree_lookup(&nm_i->nat_set_root, set);
if (!head) { if (!head) {
head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
...@@ -156,11 +156,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, ...@@ -156,11 +156,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
INIT_LIST_HEAD(&head->set_list); INIT_LIST_HEAD(&head->set_list);
head->set = set; head->set = set;
head->entry_cnt = 0; head->entry_cnt = 0;
f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
if (radix_tree_insert(&nm_i->nat_set_root, set, head)) {
kmem_cache_free(nat_entry_set_slab, head);
goto retry;
}
} }
list_move_tail(&ne->list, &head->entry_list); list_move_tail(&ne->list, &head->entry_list);
nm_i->dirty_nat_cnt++; nm_i->dirty_nat_cnt++;
...@@ -238,13 +234,8 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) ...@@ -238,13 +234,8 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
{ {
struct nat_entry *new; struct nat_entry *new;
new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
if (!new) f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
return NULL;
if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
kmem_cache_free(nat_entry_slab, new);
return NULL;
}
memset(new, 0, sizeof(struct nat_entry)); memset(new, 0, sizeof(struct nat_entry));
nat_set_nid(new, nid); nat_set_nid(new, nid);
nat_reset_flag(new); nat_reset_flag(new);
...@@ -257,15 +248,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, ...@@ -257,15 +248,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
struct f2fs_nat_entry *ne) struct f2fs_nat_entry *ne)
{ {
struct nat_entry *e; struct nat_entry *e;
retry:
down_write(&nm_i->nat_tree_lock); down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid); e = __lookup_nat_cache(nm_i, nid);
if (!e) { if (!e) {
e = grab_nat_entry(nm_i, nid); e = grab_nat_entry(nm_i, nid);
if (!e) {
up_write(&nm_i->nat_tree_lock);
goto retry;
}
node_info_from_raw_nat(&e->ni, ne); node_info_from_raw_nat(&e->ni, ne);
} }
up_write(&nm_i->nat_tree_lock); up_write(&nm_i->nat_tree_lock);
...@@ -276,15 +263,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, ...@@ -276,15 +263,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
{ {
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e; struct nat_entry *e;
retry:
down_write(&nm_i->nat_tree_lock); down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid); e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) { if (!e) {
e = grab_nat_entry(nm_i, ni->nid); e = grab_nat_entry(nm_i, ni->nid);
if (!e) {
up_write(&nm_i->nat_tree_lock);
goto retry;
}
e->ni = *ni; e->ni = *ni;
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) { } else if (new_blkaddr == NEW_ADDR) {
...@@ -1833,19 +1816,13 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) ...@@ -1833,19 +1816,13 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
raw_ne = nat_in_journal(sum, i); raw_ne = nat_in_journal(sum, i);
retry:
down_write(&nm_i->nat_tree_lock); down_write(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid); ne = __lookup_nat_cache(nm_i, nid);
if (ne)
goto found;
ne = grab_nat_entry(nm_i, nid);
if (!ne) { if (!ne) {
up_write(&nm_i->nat_tree_lock); ne = grab_nat_entry(nm_i, nid);
goto retry; node_info_from_raw_nat(&ne->ni, &raw_ne);
} }
node_info_from_raw_nat(&ne->ni, &raw_ne);
found:
__set_nat_cache_dirty(nm_i, ne); __set_nat_cache_dirty(nm_i, ne);
up_write(&nm_i->nat_tree_lock); up_write(&nm_i->nat_tree_lock);
} }
......
...@@ -179,13 +179,13 @@ void register_inmem_page(struct inode *inode, struct page *page) ...@@ -179,13 +179,13 @@ void register_inmem_page(struct inode *inode, struct page *page)
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new; struct inmem_pages *new;
int err; int err;
retry:
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
/* add atomic page indices to the list */ /* add atomic page indices to the list */
new->page = page; new->page = page;
INIT_LIST_HEAD(&new->list); INIT_LIST_HEAD(&new->list);
retry:
/* increase reference count with clean state */ /* increase reference count with clean state */
mutex_lock(&fi->inmem_lock); mutex_lock(&fi->inmem_lock);
err = radix_tree_insert(&fi->inmem_root, page->index, new); err = radix_tree_insert(&fi->inmem_root, page->index, new);
...@@ -195,7 +195,6 @@ void register_inmem_page(struct inode *inode, struct page *page) ...@@ -195,7 +195,6 @@ void register_inmem_page(struct inode *inode, struct page *page)
return; return;
} else if (err) { } else if (err) {
mutex_unlock(&fi->inmem_lock); mutex_unlock(&fi->inmem_lock);
kmem_cache_free(inmem_entry_slab, new);
goto retry; goto retry;
} }
get_page(page); get_page(page);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册