提交 c1ce5fb7 编写于 作者: Y Yu Kuai 提交者: Jialin Zhang

md: refactor idle/frozen_sync_thread()

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I6OMCC
CVE: NA

--------------------------------

Our test found a following deadlock in raid10:

1) Issue a normal write, and such write failed:

  raid10_end_write_request
   set_bit(R10BIO_WriteError, &r10_bio->state)
   one_write_done
    reschedule_retry

  // later from md thread
  raid10d
   handle_write_completed
    list_add(&r10_bio->retry_list, &conf->bio_end_io_list)

  // later from md thread
  raid10d
   if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
    list_move(conf->bio_end_io_list.prev, &tmp)
    r10_bio = list_first_entry(&tmp, struct r10bio, retry_list)
    raid_end_bio_io(r10_bio)

Dependency chain 1: normal io is waiting for updating superblock

2) Trigger a recovery:

  raid10_sync_request
   raise_barrier

Dependency chain 2: sync thread is waiting for normal io

3) echo idle/frozen to sync_action:

  action_store
   mddev_lock
    md_unregister_thread
     kthread_stop

Dependency chain 3: drop 'reconfig_mutex' is waiting for sync thread

4) md thread can't update superblock:

  raid10d
   md_check_recovery
    if (mddev_trylock(mddev))
     md_update_sb

Dependency chain 4: update superblock is waiting for 'reconfig_mutex'

Hence cyclic dependency exist, in order to fix the problem, we must
break one of them. Dependency 1 and 2 can't be broken because they are
foundation design. Dependency 4 may be possible if it can be guaranteed
that no io can be inflight, however, this requires a new mechanism which
seems complex. Dependency 3 is a good choice, because idle/frozen only
requires sync thread to finish, which can be done asynchronously that is
already implemented, and 'reconfig_mutex' is not needed anymore.

This patch switch 'idle' and 'frozen' to wait sync thread to be done
asynchronously, and this patch also add a sequence counter to record how
many times sync thread is done, so that 'idle' won't keep waiting on new
started sync thread.
Signed-off-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NJialin Zhang <zhangjialin11@huawei.com>
上级 6677cbd9
...@@ -697,6 +697,7 @@ void mddev_init(struct mddev *mddev) ...@@ -697,6 +697,7 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->active, 1); atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0); atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0); atomic_set(&mddev->active_io, 0);
atomic_set(&mddev->sync_seq, 0);
spin_lock_init(&mddev->lock); spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0); atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->sb_wait);
...@@ -4876,19 +4877,28 @@ static void stop_sync_thread(struct mddev *mddev) ...@@ -4876,19 +4877,28 @@ static void stop_sync_thread(struct mddev *mddev)
if (work_pending(&mddev->del_work)) if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery); /*
md_reap_sync_thread(mddev); * Thread might be blocked waiting for metadata update which will now
} * never happen.
*/
if (mddev->sync_thread)
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev); mddev_unlock(mddev);
} }
static void idle_sync_thread(struct mddev *mddev) static void idle_sync_thread(struct mddev *mddev)
{ {
int sync_seq = atomic_read(&mddev->sync_seq);
mutex_lock(&mddev->sync_mutex); mutex_lock(&mddev->sync_mutex);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev); stop_sync_thread(mddev);
wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
mutex_unlock(&mddev->sync_mutex); mutex_unlock(&mddev->sync_mutex);
} }
...@@ -4897,6 +4907,10 @@ static void frozen_sync_thread(struct mddev *mddev) ...@@ -4897,6 +4907,10 @@ static void frozen_sync_thread(struct mddev *mddev)
mutex_lock(&mddev->sync_mutex); mutex_lock(&mddev->sync_mutex);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev); stop_sync_thread(mddev);
wait_event(resync_wait, mddev->sync_thread == NULL &&
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
mutex_unlock(&mddev->sync_mutex); mutex_unlock(&mddev->sync_mutex);
} }
...@@ -9471,6 +9485,8 @@ void md_reap_sync_thread(struct mddev *mddev) ...@@ -9471,6 +9485,8 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */ /* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread); md_unregister_thread(&mddev->sync_thread);
atomic_inc(&mddev->sync_seq);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) { mddev->degraded != mddev->raid_disks) {
......
...@@ -531,6 +531,8 @@ struct mddev { ...@@ -531,6 +531,8 @@ struct mddev {
/* Used to synchronize idle and frozen for action_store() */ /* Used to synchronize idle and frozen for action_store() */
struct mutex sync_mutex; struct mutex sync_mutex;
/* The sequence number for sync thread */
atomic_t sync_seq;
}; };
enum recovery_flags { enum recovery_flags {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册