diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index bcba2742cfba3785d994abd1ead7dbe9b8d55378..c804e44b9455bf908c0900404d3e362f3af6b4f0 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1102,6 +1102,7 @@ struct drbd_conf { struct fifo_buffer rs_plan_s; /* correction values of resync planer */ int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planed */ + atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9d9c2ed31e9afcb6af2b1136dd84c0e626936a8b..e81d009dd0612d37c06b507265e33f8cc0d07db0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2799,6 +2799,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) atomic_set(&mdev->pp_in_use_by_net, 0); atomic_set(&mdev->rs_sect_in, 0); atomic_set(&mdev->rs_sect_ev, 0); + atomic_set(&mdev->ap_in_flight, 0); mutex_init(&mdev->md_io_mutex); mutex_init(&mdev->data.mutex); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 08f53ce9b88f07687bd5cf3c4557f7eedc19ae3e..5c60d77d447ceb0266db94feba948feb6f84c118 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -558,6 +558,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case handed_over_to_network: /* assert something? */ + if (bio_data_dir(req->master_bio) == WRITE) + atomic_add(req->size>>9, &mdev->ap_in_flight); + if (bio_data_dir(req->master_bio) == WRITE && mdev->net_conf->wire_protocol == DRBD_PROT_A) { /* this is what is dangerous about protocol A: @@ -591,6 +594,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, dec_ap_pending(mdev); req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); req->rq_state |= RQ_NET_DONE; + if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) + atomic_sub(req->size>>9, &mdev->ap_in_flight); + /* if it is still queued, we may not complete it here. * it will be canceled soon. */ if (!(req->rq_state & RQ_NET_QUEUED)) @@ -628,14 +634,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_NET_OK; D_ASSERT(req->rq_state & RQ_NET_PENDING); dec_ap_pending(mdev); + atomic_sub(req->size>>9, &mdev->ap_in_flight); req->rq_state &= ~RQ_NET_PENDING; _req_may_be_done_not_susp(req, m); break; case neg_acked: /* assert something? */ - if (req->rq_state & RQ_NET_PENDING) + if (req->rq_state & RQ_NET_PENDING) { dec_ap_pending(mdev); + atomic_sub(req->size>>9, &mdev->ap_in_flight); + } req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); req->rq_state |= RQ_NET_DONE; @@ -692,6 +701,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, } D_ASSERT(req->rq_state & RQ_NET_SENT); req->rq_state |= RQ_NET_DONE; + if (mdev->net_conf->wire_protocol == DRBD_PROT_A) + atomic_sub(req->size>>9, &mdev->ap_in_flight); _req_may_be_done(req, m); /* Allowed while state.susp */ break; diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index ab2bd09d54b4bfc9fbf7bf74c2e46dbbeb389db5..69d350fe7c1e6ef1e42d4426ddc1c298d16428e5 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -338,19 +338,21 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) return rv; } -/* completion of master bio is outside of spinlock. - * If you need it irqsave, do it your self! - * Which means: don't use from bio endio callback. */ +/* completion of master bio is outside of our spinlock. + * We still may or may not be inside some irqs disabled section + * of the lower level driver completion callback, so we need to + * spin_lock_irqsave here. */ static inline int req_mod(struct drbd_request *req, enum drbd_req_event what) { + unsigned long flags; struct drbd_conf *mdev = req->mdev; struct bio_and_error m; int rv; - spin_lock_irq(&mdev->req_lock); + spin_lock_irqsave(&mdev->req_lock, flags); rv = __req_mod(req, what, &m); - spin_unlock_irq(&mdev->req_lock); + spin_unlock_irqrestore(&mdev->req_lock, flags); if (m.bio) complete_master_bio(mdev, &m);