提交 9a100a44 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/arjan/linux-2.6-async-2

* git://git.kernel.org/pub/scm/linux/kernel/git/arjan/linux-2.6-async-2:
  async: make async a command line option for now
  partial revert of asynchronous inode delete
...@@ -1139,11 +1139,16 @@ EXPORT_SYMBOL(remove_inode_hash); ...@@ -1139,11 +1139,16 @@ EXPORT_SYMBOL(remove_inode_hash);
* I_FREEING is set so that no-one will take a new reference to the inode while * I_FREEING is set so that no-one will take a new reference to the inode while
* it is being deleted. * it is being deleted.
*/ */
static void generic_delete_inode_async(void *data, async_cookie_t cookie) void generic_delete_inode(struct inode *inode)
{ {
struct inode *inode = data;
const struct super_operations *op = inode->i_sb->s_op; const struct super_operations *op = inode->i_sb->s_op;
list_del_init(&inode->i_list);
list_del_init(&inode->i_sb_list);
inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
security_inode_delete(inode); security_inode_delete(inode);
if (op->delete_inode) { if (op->delete_inode) {
...@@ -1167,16 +1172,6 @@ static void generic_delete_inode_async(void *data, async_cookie_t cookie) ...@@ -1167,16 +1172,6 @@ static void generic_delete_inode_async(void *data, async_cookie_t cookie)
destroy_inode(inode); destroy_inode(inode);
} }
void generic_delete_inode(struct inode *inode)
{
list_del_init(&inode->i_list);
list_del_init(&inode->i_sb_list);
inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
async_schedule_special(generic_delete_inode_async, inode, &inode->i_sb->s_async_list);
}
EXPORT_SYMBOL(generic_delete_inode); EXPORT_SYMBOL(generic_delete_inode);
static void generic_forget_inode(struct inode *inode) static void generic_forget_inode(struct inode *inode)
......
...@@ -65,6 +65,8 @@ static LIST_HEAD(async_pending); ...@@ -65,6 +65,8 @@ static LIST_HEAD(async_pending);
static LIST_HEAD(async_running); static LIST_HEAD(async_running);
static DEFINE_SPINLOCK(async_lock); static DEFINE_SPINLOCK(async_lock);
static int async_enabled = 0;
struct async_entry { struct async_entry {
struct list_head list; struct list_head list;
async_cookie_t cookie; async_cookie_t cookie;
...@@ -169,7 +171,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l ...@@ -169,7 +171,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
* If we're out of memory or if there's too much work * If we're out of memory or if there's too much work
* pending already, we execute synchronously. * pending already, we execute synchronously.
*/ */
if (!entry || atomic_read(&entry_count) > MAX_WORK) { if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
kfree(entry); kfree(entry);
spin_lock_irqsave(&async_lock, flags); spin_lock_irqsave(&async_lock, flags);
newcookie = next_cookie++; newcookie = next_cookie++;
...@@ -316,8 +318,18 @@ static int async_manager_thread(void *unused) ...@@ -316,8 +318,18 @@ static int async_manager_thread(void *unused)
static int __init async_init(void) static int __init async_init(void)
{ {
if (async_enabled)
kthread_run(async_manager_thread, NULL, "async/mgr"); kthread_run(async_manager_thread, NULL, "async/mgr");
return 0; return 0;
} }
static int __init setup_async(char *str)
{
async_enabled = 1;
return 1;
}
__setup("fastboot", setup_async);
core_initcall(async_init); core_initcall(async_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册