提交 769ec6e5 编写于 作者: J Jaegeuk Kim

f2fs: call radix_tree_preload before radix_tree_insert

This patch tries to fix:

 BUG: using smp_processor_id() in preemptible [00000000] code: f2fs_gc-254:0/384
  (radix_tree_node_alloc+0x14/0x74) from [<c033d8a0>] (radix_tree_insert+0x110/0x200)
  (radix_tree_insert+0x110/0x200) from [<c02e8264>] (gc_data_segment+0x340/0x52c)
  (gc_data_segment+0x340/0x52c) from [<c02e8658>] (f2fs_gc+0x208/0x400)
  (f2fs_gc+0x208/0x400) from [<c02e8a98>] (gc_thread_func+0x248/0x28c)
  (gc_thread_func+0x248/0x28c) from [<c0139944>] (kthread+0xa0/0xac)
  (kthread+0xa0/0xac) from [<c0105ef8>] (ret_from_fork+0x14/0x3c)

The reason is that f2fs calls radix_tree_insert under enabled preemption.
So, before calling it, we need to call radix_tree_preload.

Otherwise, we should use _GFP_WAIT for the radix tree, and use mutex or
semaphore to cover the radix tree operations.
Signed-off-by: NJaegeuk Kim <jaegeuk@kernel.org>
上级 8b26ef98
......@@ -304,6 +304,11 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
struct inode_management *im = &sbi->im[type];
struct ino_entry *e;
retry:
if (radix_tree_preload(GFP_NOFS)) {
cond_resched();
goto retry;
}
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
......@@ -311,11 +316,13 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
if (!e) {
spin_unlock(&im->ino_lock);
radix_tree_preload_end();
goto retry;
}
if (radix_tree_insert(&im->ino_root, ino, e)) {
spin_unlock(&im->ino_lock);
kmem_cache_free(ino_entry_slab, e);
radix_tree_preload_end();
goto retry;
}
memset(e, 0, sizeof(struct ino_entry));
......@@ -326,6 +333,7 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
im->ino_num++;
}
spin_unlock(&im->ino_lock);
radix_tree_preload_end();
}
static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
......
......@@ -351,7 +351,6 @@ static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
{
struct inode_entry *new_ie;
int ret;
if (inode == find_gc_inode(gc_list, inode->i_ino)) {
iput(inode);
......@@ -361,8 +360,7 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
new_ie->inode = inode;
ret = radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
if (ret) {
if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
kmem_cache_free(winode_slab, new_ie);
goto retry;
}
......@@ -703,7 +701,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
struct cp_control cpc;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(GFP_ATOMIC),
.iroot = RADIX_TREE_INIT(GFP_NOFS),
};
cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC;
......
......@@ -1447,15 +1447,22 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
i->nid = nid;
i->state = NID_NEW;
if (radix_tree_preload(GFP_NOFS)) {
kmem_cache_free(free_nid_slab, i);
return 0;
}
spin_lock(&nm_i->free_nid_list_lock);
if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
spin_unlock(&nm_i->free_nid_list_lock);
radix_tree_preload_end();
kmem_cache_free(free_nid_slab, i);
return 0;
}
list_add_tail(&i->list, &nm_i->free_nid_list);
nm_i->fcnt++;
spin_unlock(&nm_i->free_nid_list_lock);
radix_tree_preload_end();
return 1;
}
......@@ -1994,8 +2001,8 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
INIT_LIST_HEAD(&nm_i->free_nid_list);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_ATOMIC);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
mutex_init(&nm_i->build_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册