提交 181286ad 编写于 作者: L Lars Ellenberg 提交者: Philipp Reisner

drbd: preparation commit, pass drbd_interval to drbd_al_begin/complete_io

We want to avoid bio_split for bios crossing activity log boundaries.
So we may need to activate two activity log extents "atomically".
drbd_al_begin_io() needs to know more than just the start sector.
Signed-off-by: NPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: NLars Ellenberg <lars.ellenberg@linbit.com>
上级 85f103d8
...@@ -205,9 +205,9 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) ...@@ -205,9 +205,9 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
return al_ext; return al_ext;
} }
void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
{ {
unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); unsigned int enr = (i->sector >> (AL_EXTENT_SHIFT-9));
struct lc_element *al_ext; struct lc_element *al_ext;
struct update_al_work al_work; struct update_al_work al_work;
...@@ -254,9 +254,9 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) ...@@ -254,9 +254,9 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
} }
} }
void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
{ {
unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); unsigned int enr = (i->sector >> (AL_EXTENT_SHIFT-9));
struct lc_element *extent; struct lc_element *extent;
unsigned long flags; unsigned long flags;
......
...@@ -1584,8 +1584,8 @@ extern const char *drbd_conn_str(enum drbd_conns s); ...@@ -1584,8 +1584,8 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s); extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */ /* drbd_actlog.c */
extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector); extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector); extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
......
...@@ -2061,7 +2061,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) ...@@ -2061,7 +2061,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size); drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO; peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
peer_req->flags &= ~EE_MAY_SET_IN_SYNC; peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
drbd_al_begin_io(mdev, peer_req->i.sector); drbd_al_begin_io(mdev, &peer_req->i);
} }
err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR); err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
...@@ -2075,7 +2075,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) ...@@ -2075,7 +2075,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
drbd_remove_epoch_entry_interval(mdev, peer_req); drbd_remove_epoch_entry_interval(mdev, peer_req);
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
drbd_al_complete_io(mdev, peer_req->i.sector); drbd_al_complete_io(mdev, &peer_req->i);
out_interrupted: out_interrupted:
drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP); drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
......
...@@ -128,12 +128,12 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const ...@@ -128,12 +128,12 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
if (s & RQ_LOCAL_MASK) { if (s & RQ_LOCAL_MASK) {
if (get_ldev_if_state(mdev, D_FAILED)) { if (get_ldev_if_state(mdev, D_FAILED)) {
if (s & RQ_IN_ACT_LOG) if (s & RQ_IN_ACT_LOG)
drbd_al_complete_io(mdev, req->i.sector); drbd_al_complete_io(mdev, &req->i);
put_ldev(mdev); put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) { } else if (__ratelimit(&drbd_ratelimit_state)) {
dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), " dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
"but my Disk seems to have failed :(\n", "but my Disk seems to have failed :(\n",
(unsigned long long) req->i.sector); (unsigned long long) req->i.sector, req->i.size);
} }
} }
} }
...@@ -782,7 +782,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s ...@@ -782,7 +782,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
* of transactional on-disk meta data updates. */ * of transactional on-disk meta data updates. */
if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
req->rq_state |= RQ_IN_ACT_LOG; req->rq_state |= RQ_IN_ACT_LOG;
drbd_al_begin_io(mdev, sector); drbd_al_begin_io(mdev, &req->i);
} }
remote = remote && drbd_should_do_remote(mdev->state); remote = remote && drbd_should_do_remote(mdev->state);
...@@ -979,7 +979,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s ...@@ -979,7 +979,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
fail_free_complete: fail_free_complete:
if (req->rq_state & RQ_IN_ACT_LOG) if (req->rq_state & RQ_IN_ACT_LOG)
drbd_al_complete_io(mdev, sector); drbd_al_complete_io(mdev, &req->i);
fail_and_free_req: fail_and_free_req:
if (local) { if (local) {
bio_put(req->private_bio); bio_put(req->private_bio);
......
...@@ -101,7 +101,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -101,7 +101,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
{ {
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_conf *mdev = peer_req->w.mdev; struct drbd_conf *mdev = peer_req->w.mdev;
sector_t e_sector; struct drbd_interval i;
int do_wake; int do_wake;
u64 block_id; u64 block_id;
int do_al_complete_io; int do_al_complete_io;
...@@ -110,7 +110,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -110,7 +110,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
* we may no longer access it, * we may no longer access it,
* it may be freed/reused already! * it may be freed/reused already!
* (as soon as we release the req_lock) */ * (as soon as we release the req_lock) */
e_sector = peer_req->i.sector; i = peer_req->i;
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = peer_req->block_id; block_id = peer_req->block_id;
...@@ -134,13 +134,13 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -134,13 +134,13 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (block_id == ID_SYNCER) if (block_id == ID_SYNCER)
drbd_rs_complete_io(mdev, e_sector); drbd_rs_complete_io(mdev, i.sector);
if (do_wake) if (do_wake)
wake_up(&mdev->ee_wait); wake_up(&mdev->ee_wait);
if (do_al_complete_io) if (do_al_complete_io)
drbd_al_complete_io(mdev, e_sector); drbd_al_complete_io(mdev, &i);
wake_asender(mdev->tconn); wake_asender(mdev->tconn);
put_ldev(mdev); put_ldev(mdev);
...@@ -1301,7 +1301,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) ...@@ -1301,7 +1301,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
struct drbd_conf *mdev = w->mdev; struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(mdev, req->i.sector); drbd_al_begin_io(mdev, &req->i);
/* Calling drbd_al_begin_io() out of the worker might deadlocks /* Calling drbd_al_begin_io() out of the worker might deadlocks
theoretically. Practically it can not deadlock, since this is theoretically. Practically it can not deadlock, since this is
only used when unfreezing IOs. All the extents of the requests only used when unfreezing IOs. All the extents of the requests
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册