提交 87c0fded 编写于 作者: I Ilya Dryomov

rbd: don't wait for the lock forever if blacklisted

-EBLACKLISTED from __rbd_register_watch() means that our ceph_client
got blacklisted - we won't be able to restore the watch and reacquire
the lock.  Wake up and fail all outstanding requests waiting for the
lock and arrange for all new requests that require the lock to fail
immediately.
Signed-off-by: NIlya Dryomov <idryomov@gmail.com>
Tested-by: NMike Christie <mchristi@redhat.com>
上级 1001354c
...@@ -415,15 +415,15 @@ struct rbd_device { ...@@ -415,15 +415,15 @@ struct rbd_device {
}; };
/* /*
* Flag bits for rbd_dev->flags. If atomicity is required, * Flag bits for rbd_dev->flags:
* rbd_dev->lock is used to protect access. * - REMOVING (which is coupled with rbd_dev->open_count) is protected
* * by rbd_dev->lock
* Currently, only the "removing" flag (which is coupled with the * - BLACKLISTED is protected by rbd_dev->lock_rwsem
* "open_count" field) requires atomic access.
*/ */
enum rbd_dev_flags { enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
}; };
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
...@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work) ...@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work), struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork); struct rbd_device, watch_dwork);
bool was_lock_owner = false; bool was_lock_owner = false;
bool need_to_wake = false;
int ret; int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev); dout("%s rbd_dev %p\n", __func__, rbd_dev);
...@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work) ...@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
was_lock_owner = rbd_release_lock(rbd_dev); was_lock_owner = rbd_release_lock(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex); mutex_lock(&rbd_dev->watch_mutex);
if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
goto fail_unlock; mutex_unlock(&rbd_dev->watch_mutex);
goto out;
}
ret = __rbd_register_watch(rbd_dev); ret = __rbd_register_watch(rbd_dev);
if (ret) { if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
if (ret != -EBLACKLISTED) if (ret == -EBLACKLISTED) {
set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
need_to_wake = true;
} else {
queue_delayed_work(rbd_dev->task_wq, queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork, &rbd_dev->watch_dwork,
RBD_RETRY_DELAY); RBD_RETRY_DELAY);
goto fail_unlock; }
mutex_unlock(&rbd_dev->watch_mutex);
goto out;
} }
need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex); mutex_unlock(&rbd_dev->watch_mutex);
...@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work) ...@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
ret); ret);
} }
out:
up_write(&rbd_dev->lock_rwsem); up_write(&rbd_dev->lock_rwsem);
if (need_to_wake)
wake_requests(rbd_dev, true); wake_requests(rbd_dev, true);
return;
fail_unlock:
mutex_unlock(&rbd_dev->watch_mutex);
up_write(&rbd_dev->lock_rwsem);
} }
/* /*
...@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) ...@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
up_read(&rbd_dev->lock_rwsem); up_read(&rbd_dev->lock_rwsem);
schedule(); schedule();
down_read(&rbd_dev->lock_rwsem); down_read(&rbd_dev->lock_rwsem);
} while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
!test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
finish_wait(&rbd_dev->lock_waitq, &wait); finish_wait(&rbd_dev->lock_waitq, &wait);
} }
...@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work) ...@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
if (must_be_locked) { if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem); down_read(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
!test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
rbd_wait_state_locked(rbd_dev); rbd_wait_state_locked(rbd_dev);
WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
!test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
result = -EBLACKLISTED;
goto err_unlock;
}
} }
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册