提交 57bcb6cf 编写于 作者: P Philipp Reisner

drbd: Do not call generic_make_request() while holding req_lock

Signed-off-by: NPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: NLars Ellenberg <lars.ellenberg@linbit.com>
上级 d60de03a
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include "drbd_req.h" #include "drbd_req.h"
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
/* Update disk stats at start of I/O request */ /* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio) static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{ {
...@@ -558,20 +560,21 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -558,20 +560,21 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* assert something? */ /* assert something? */
if (req->rq_state & RQ_NET_PENDING) if (req->rq_state & RQ_NET_PENDING)
dec_ap_pending(mdev); dec_ap_pending(mdev);
p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING;
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE; req->rq_state |= RQ_NET_DONE;
if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
if (!(req->rq_state & RQ_WRITE) &&
mdev->state.disk == D_UP_TO_DATE &&
!IS_ERR_OR_NULL(req->private_bio))
goto goto_read_retry_local;
/* if it is still queued, we may not complete it here. /* if it is still queued, we may not complete it here.
* it will be canceled soon. */ * it will be canceled soon. */
if (!(req->rq_state & RQ_NET_QUEUED)) if (!(req->rq_state & RQ_NET_QUEUED)) {
if (p)
goto goto_read_retry_local;
_req_may_be_done(req, m); /* Allowed while state.susp */ _req_may_be_done(req, m); /* Allowed while state.susp */
}
break; break;
case WRITE_ACKED_BY_PEER_AND_SIS: case WRITE_ACKED_BY_PEER_AND_SIS:
...@@ -631,9 +634,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -631,9 +634,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_NET_DONE; req->rq_state |= RQ_NET_DONE;
if (!(req->rq_state & RQ_WRITE) && if (!(req->rq_state & RQ_WRITE))
mdev->state.disk == D_UP_TO_DATE &&
!IS_ERR_OR_NULL(req->private_bio))
goto goto_read_retry_local; goto goto_read_retry_local;
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
...@@ -641,9 +642,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -641,9 +642,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
goto_read_retry_local: goto_read_retry_local:
if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
_req_may_be_done_not_susp(req, m);
break;
}
D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
req->rq_state |= RQ_LOCAL_PENDING; req->rq_state |= RQ_LOCAL_PENDING;
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
generic_make_request(req->private_bio); get_ldev(mdev);
req->w.cb = w_restart_disk_io;
drbd_queue_work(&mdev->tconn->data.work, &req->w);
break; break;
case FAIL_FROZEN_DISK_IO: case FAIL_FROZEN_DISK_IO:
...@@ -706,11 +714,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -706,11 +714,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
dec_ap_pending(mdev); dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING; req->rq_state &= ~RQ_NET_PENDING;
req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
if (!IS_ERR_OR_NULL(req->private_bio)) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(mdev);
}
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
break; break;
}; };
...@@ -840,7 +843,8 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s ...@@ -840,7 +843,8 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
} else { } else {
/* READ || READA */ /* READ || READA */
if (local) { if (local) {
if (!drbd_may_do_local_read(mdev, sector, size)) { if (!drbd_may_do_local_read(mdev, sector, size) ||
remote_due_to_read_balancing(mdev, sector)) {
/* we could kick the syncer to /* we could kick the syncer to
* sync this extent asap, wait for * sync this extent asap, wait for
* it, then continue locally. * it, then continue locally.
...@@ -850,10 +854,6 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s ...@@ -850,10 +854,6 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
bio_put(req->private_bio); bio_put(req->private_bio);
req->private_bio = NULL; req->private_bio = NULL;
put_ldev(mdev); put_ldev(mdev);
} else if (remote_due_to_read_balancing(mdev, sector)) {
/* Keep the private bio in case we need it
for a local retry */
local = 0;
} }
} }
remote = !local && mdev->state.pdsk >= D_UP_TO_DATE; remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
...@@ -1081,7 +1081,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s ...@@ -1081,7 +1081,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
if (req->rq_state & RQ_IN_ACT_LOG) if (req->rq_state & RQ_IN_ACT_LOG)
drbd_al_complete_io(mdev, &req->i); drbd_al_complete_io(mdev, &req->i);
fail_and_free_req: fail_and_free_req:
if (!IS_ERR_OR_NULL(req->private_bio)) { if (local) {
bio_put(req->private_bio); bio_put(req->private_bio);
req->private_bio = NULL; req->private_bio = NULL;
put_ldev(mdev); put_ldev(mdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册