提交 db830c46 编写于 作者: A Andreas Gruenbacher 提交者: Philipp Reisner

drbd: Local variable renames: e -> peer_req

Signed-off-by: NPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: NLars Ellenberg <lars.ellenberg@linbit.com>
上级 6c852bec
...@@ -1701,9 +1701,9 @@ static inline int drbd_bio_has_active_page(struct bio *bio) ...@@ -1701,9 +1701,9 @@ static inline int drbd_bio_has_active_page(struct bio *bio)
return 0; return 0;
} }
static inline int drbd_ee_has_active_page(struct drbd_peer_request *e) static inline int drbd_ee_has_active_page(struct drbd_peer_request *peer_req)
{ {
struct page *page = e->pages; struct page *page = peer_req->pages;
page_chain_for_each(page) { page_chain_for_each(page) {
if (page_count(page) > 1) if (page_count(page) > 1)
return 1; return 1;
......
...@@ -2429,17 +2429,17 @@ int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, ...@@ -2429,17 +2429,17 @@ int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
/** /**
* drbd_send_ack() - Sends an ack packet * drbd_send_ack() - Sends an ack packet
* @mdev: DRBD device. * @mdev: DRBD device
* @cmd: Packet command code. * @cmd: packet command code
* @e: Epoch entry. * @peer_req: peer request
*/ */
int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
struct drbd_peer_request *e) struct drbd_peer_request *peer_req)
{ {
return _drbd_send_ack(mdev, cmd, return _drbd_send_ack(mdev, cmd,
cpu_to_be64(e->i.sector), cpu_to_be64(peer_req->i.sector),
cpu_to_be32(e->i.size), cpu_to_be32(peer_req->i.size),
e->block_id); peer_req->block_id);
} }
/* This function misuses the block_id field to signal if the blocks /* This function misuses the block_id field to signal if the blocks
...@@ -2641,10 +2641,12 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) ...@@ -2641,10 +2641,12 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
return 1; return 1;
} }
static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_peer_request *e) static int _drbd_send_zc_ee(struct drbd_conf *mdev,
struct drbd_peer_request *peer_req)
{ {
struct page *page = e->pages; struct page *page = peer_req->pages;
unsigned len = e->i.size; unsigned len = peer_req->i.size;
/* hint all but last page with MSG_MORE */ /* hint all but last page with MSG_MORE */
page_chain_for_each(page) { page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE); unsigned l = min_t(unsigned, len, PAGE_SIZE);
...@@ -2747,7 +2749,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) ...@@ -2747,7 +2749,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/ */
int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
struct drbd_peer_request *e) struct drbd_peer_request *peer_req)
{ {
int ok; int ok;
struct p_data p; struct p_data p;
...@@ -2757,9 +2759,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, ...@@ -2757,9 +2759,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ? dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0; crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
prepare_header(mdev, &p.head, cmd, sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size); prepare_header(mdev, &p.head, cmd, sizeof(p) -
p.sector = cpu_to_be64(e->i.sector); sizeof(struct p_header80) +
p.block_id = e->block_id; dgs + peer_req->i.size);
p.sector = cpu_to_be64(peer_req->i.sector);
p.block_id = peer_req->block_id;
p.seq_num = 0; /* unused */ p.seq_num = 0; /* unused */
/* Only called by our kernel thread. /* Only called by our kernel thread.
...@@ -2772,11 +2776,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, ...@@ -2772,11 +2776,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
ok = sizeof(p) == drbd_send(mdev, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0); ok = sizeof(p) == drbd_send(mdev, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
if (ok && dgs) { if (ok && dgs) {
dgb = mdev->tconn->int_dig_out; dgb = mdev->tconn->int_dig_out;
drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, e, dgb); drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, dgb);
ok = dgs == drbd_send(mdev, mdev->tconn->data.socket, dgb, dgs, 0); ok = dgs == drbd_send(mdev, mdev->tconn->data.socket, dgb, dgs, 0);
} }
if (ok) if (ok)
ok = _drbd_send_zc_ee(mdev, e); ok = _drbd_send_zc_ee(mdev, peer_req);
drbd_put_data_sock(mdev); drbd_put_data_sock(mdev);
......
...@@ -2445,7 +2445,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) ...@@ -2445,7 +2445,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs, void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
const char *seen_hash, const char *calc_hash, const char *seen_hash, const char *calc_hash,
const struct drbd_peer_request *e) const struct drbd_peer_request *peer_req)
{ {
struct cn_msg *cn_reply; struct cn_msg *cn_reply;
struct drbd_nl_cfg_reply *reply; struct drbd_nl_cfg_reply *reply;
...@@ -2453,7 +2453,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs, ...@@ -2453,7 +2453,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
struct page *page; struct page *page;
unsigned len; unsigned len;
if (!e) if (!peer_req)
return; return;
if (!reason || !reason[0]) if (!reason || !reason[0])
return; return;
...@@ -2472,8 +2472,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs, ...@@ -2472,8 +2472,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
GFP_NOIO); GFP_NOIO);
if (!cn_reply) { if (!cn_reply) {
dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, "
(unsigned long long)e->i.sector, e->i.size); "sector %llu, size %u\n",
(unsigned long long)peer_req->i.sector,
peer_req->i.size);
return; return;
} }
...@@ -2483,15 +2485,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs, ...@@ -2483,15 +2485,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
tl = tl_add_str(tl, T_dump_ee_reason, reason); tl = tl_add_str(tl, T_dump_ee_reason, reason);
tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
tl = tl_add_int(tl, T_ee_sector, &e->i.sector); tl = tl_add_int(tl, T_ee_sector, &peer_req->i.sector);
tl = tl_add_int(tl, T_ee_block_id, &e->block_id); tl = tl_add_int(tl, T_ee_block_id, &peer_req->block_id);
/* dump the first 32k */ /* dump the first 32k */
len = min_t(unsigned, e->i.size, 32 << 10); len = min_t(unsigned, peer_req->i.size, 32 << 10);
put_unaligned(T_ee_data, tl++); put_unaligned(T_ee_data, tl++);
put_unaligned(len, tl++); put_unaligned(len, tl++);
page = e->pages; page = peer_req->pages;
page_chain_for_each(page) { page_chain_for_each(page) {
void *d = kmap_atomic(page, KM_USER0); void *d = kmap_atomic(page, KM_USER0);
unsigned l = min_t(unsigned, len, PAGE_SIZE); unsigned l = min_t(unsigned, len, PAGE_SIZE);
......
此差异已折叠。
...@@ -80,47 +80,47 @@ void drbd_md_io_complete(struct bio *bio, int error) ...@@ -80,47 +80,47 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner, /* reads on behalf of the partner,
* "submitted" by the receiver * "submitted" by the receiver
*/ */
void drbd_endio_read_sec_final(struct drbd_peer_request *e) __releases(local) void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{ {
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_conf *mdev = e->mdev; struct drbd_conf *mdev = peer_req->mdev;
spin_lock_irqsave(&mdev->tconn->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->read_cnt += e->i.size >> 9; mdev->read_cnt += peer_req->i.size >> 9;
list_del(&e->w.list); list_del(&peer_req->w.list);
if (list_empty(&mdev->read_ee)) if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait); wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags)) if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
drbd_queue_work(&mdev->tconn->data.work, &e->w); drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
put_ldev(mdev); put_ldev(mdev);
} }
/* writes on behalf of the partner, or resync writes, /* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */ * "submitted" by the receiver, final stage. */
static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(local) static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{ {
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_conf *mdev = e->mdev; struct drbd_conf *mdev = peer_req->mdev;
sector_t e_sector; sector_t e_sector;
int do_wake; int do_wake;
u64 block_id; u64 block_id;
int do_al_complete_io; int do_al_complete_io;
/* after we moved e to done_ee, /* after we moved peer_req to done_ee,
* we may no longer access it, * we may no longer access it,
* it may be freed/reused already! * it may be freed/reused already!
* (as soon as we release the req_lock) */ * (as soon as we release the req_lock) */
e_sector = e->i.sector; e_sector = peer_req->i.sector;
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = e->block_id; block_id = peer_req->block_id;
spin_lock_irqsave(&mdev->tconn->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->writ_cnt += e->i.size >> 9; mdev->writ_cnt += peer_req->i.size >> 9;
list_del(&e->w.list); /* has been on active_ee or sync_ee */ list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee); list_add_tail(&peer_req->w.list, &mdev->done_ee);
/* /*
* Do not remove from the write_requests tree here: we did not send the * Do not remove from the write_requests tree here: we did not send the
...@@ -132,7 +132,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l ...@@ -132,7 +132,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l
do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee); do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags)) if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
...@@ -154,20 +154,20 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l ...@@ -154,20 +154,20 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(l
*/ */
void drbd_endio_sec(struct bio *bio, int error) void drbd_endio_sec(struct bio *bio, int error)
{ {
struct drbd_peer_request *e = bio->bi_private; struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_conf *mdev = e->mdev; struct drbd_conf *mdev = peer_req->mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE); int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE; int is_write = bio_data_dir(bio) == WRITE;
if (error && __ratelimit(&drbd_ratelimit_state)) if (error && __ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: error=%d s=%llus\n", dev_warn(DEV, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error, is_write ? "write" : "read", error,
(unsigned long long)e->i.sector); (unsigned long long)peer_req->i.sector);
if (!error && !uptodate) { if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
is_write ? "write" : "read", is_write ? "write" : "read",
(unsigned long long)e->i.sector); (unsigned long long)peer_req->i.sector);
/* strange behavior of some lower level drivers... /* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag, * fail the request by clearing the uptodate flag,
* but do not return any error?! */ * but do not return any error?! */
...@@ -175,14 +175,14 @@ void drbd_endio_sec(struct bio *bio, int error) ...@@ -175,14 +175,14 @@ void drbd_endio_sec(struct bio *bio, int error)
} }
if (error) if (error)
set_bit(__EE_WAS_ERROR, &e->flags); set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */ bio_put(bio); /* no need for the bio anymore */
if (atomic_dec_and_test(&e->pending_bios)) { if (atomic_dec_and_test(&peer_req->pending_bios)) {
if (is_write) if (is_write)
drbd_endio_write_sec_final(e); drbd_endio_write_sec_final(peer_req);
else else
drbd_endio_read_sec_final(e); drbd_endio_read_sec_final(peer_req);
} }
} }
...@@ -248,11 +248,11 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -248,11 +248,11 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
} }
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
struct drbd_peer_request *e, void *digest) struct drbd_peer_request *peer_req, void *digest)
{ {
struct hash_desc desc; struct hash_desc desc;
struct scatterlist sg; struct scatterlist sg;
struct page *page = e->pages; struct page *page = peer_req->pages;
struct page *tmp; struct page *tmp;
unsigned len; unsigned len;
...@@ -269,7 +269,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, ...@@ -269,7 +269,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
page = tmp; page = tmp;
} }
/* and now the last, possibly only partially used page */ /* and now the last, possibly only partially used page */
len = e->i.size & (PAGE_SIZE - 1); len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
crypto_hash_update(&desc, &sg, sg.length); crypto_hash_update(&desc, &sg, sg.length);
crypto_hash_final(&desc, digest); crypto_hash_final(&desc, digest);
...@@ -298,7 +298,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * ...@@ -298,7 +298,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
/* TODO merge common code with w_e_end_ov_req */ /* TODO merge common code with w_e_end_ov_req */
int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{ {
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
int digest_size; int digest_size;
void *digest; void *digest;
int ok = 1; int ok = 1;
...@@ -306,22 +307,22 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -306,22 +307,22 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (unlikely(cancel)) if (unlikely(cancel))
goto out; goto out;
if (likely((e->flags & EE_WAS_ERROR) != 0)) if (likely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out; goto out;
digest_size = crypto_hash_digestsize(mdev->csums_tfm); digest_size = crypto_hash_digestsize(mdev->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
if (digest) { if (digest) {
sector_t sector = e->i.sector; sector_t sector = peer_req->i.sector;
unsigned int size = e->i.size; unsigned int size = peer_req->i.size;
drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
/* Free e and pages before send. /* Free e and pages before send.
* In case we block on congestion, we could otherwise run into * In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in * congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */ * drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
e = NULL; peer_req = NULL;
inc_rs_pending(mdev); inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev, sector, size, ok = drbd_send_drequest_csum(mdev, sector, size,
digest, digest_size, digest, digest_size,
...@@ -333,8 +334,8 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -333,8 +334,8 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
} }
out: out:
if (e) if (peer_req)
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
if (unlikely(!ok)) if (unlikely(!ok))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
...@@ -345,7 +346,7 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -345,7 +346,7 @@ int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{ {
struct drbd_peer_request *e; struct drbd_peer_request *peer_req;
if (!get_ldev(mdev)) if (!get_ldev(mdev))
return -EIO; return -EIO;
...@@ -355,17 +356,17 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) ...@@ -355,17 +356,17 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may /* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */ * be rescheduled for later. It is "only" background resync, after all. */
e = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY); peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
if (!e) if (!peer_req)
goto defer; goto defer;
e->w.cb = w_e_send_csum; peer_req->w.cb = w_e_send_csum;
spin_lock_irq(&mdev->tconn->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
list_add(&e->w.list, &mdev->read_ee); list_add(&peer_req->w.list, &mdev->read_ee);
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(size >> 9, &mdev->rs_sect_ev); atomic_add(size >> 9, &mdev->rs_sect_ev);
if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) if (drbd_submit_ee(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0; return 0;
/* If it failed because of ENOMEM, retry should help. If it failed /* If it failed because of ENOMEM, retry should help. If it failed
...@@ -373,10 +374,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) ...@@ -373,10 +374,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
* retry may or may not help. * retry may or may not help.
* If it does not, you may need to force disconnect. */ * If it does not, you may need to force disconnect. */
spin_lock_irq(&mdev->tconn->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list); list_del(&peer_req->w.list);
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
defer: defer:
put_ldev(mdev); put_ldev(mdev);
return -EAGAIN; return -EAGAIN;
...@@ -901,19 +902,19 @@ int drbd_resync_finished(struct drbd_conf *mdev) ...@@ -901,19 +902,19 @@ int drbd_resync_finished(struct drbd_conf *mdev)
} }
/* helper */ /* helper */
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *e) static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
{ {
if (drbd_ee_has_active_page(e)) { if (drbd_ee_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */ /* This might happen if sendpage() has not finished */
int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &mdev->pp_in_use_by_net); atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use); atomic_sub(i, &mdev->pp_in_use);
spin_lock_irq(&mdev->tconn->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee); list_add_tail(&peer_req->w.list, &mdev->net_ee);
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait); wake_up(&drbd_pp_wait);
} else } else
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
} }
/** /**
...@@ -924,28 +925,28 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ ...@@ -924,28 +925,28 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ
*/ */
int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{ {
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
int ok; int ok;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 1; return 1;
} }
if (likely((e->flags & EE_WAS_ERROR) == 0)) { if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
ok = drbd_send_block(mdev, P_DATA_REPLY, e); ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
} else { } else {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. sector=%llus.\n", dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
(unsigned long long)e->i.sector); (unsigned long long)peer_req->i.sector);
ok = drbd_send_ack(mdev, P_NEG_DREPLY, e); ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
} }
dec_unacked(mdev); dec_unacked(mdev);
move_to_net_ee_or_free(mdev, e); move_to_net_ee_or_free(mdev, peer_req);
if (unlikely(!ok)) if (unlikely(!ok))
dev_err(DEV, "drbd_send_block() failed\n"); dev_err(DEV, "drbd_send_block() failed\n");
...@@ -960,26 +961,26 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -960,26 +961,26 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
*/ */
int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{ {
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
int ok; int ok;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 1; return 1;
} }
if (get_ldev_if_state(mdev, D_FAILED)) { if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, e->i.sector); drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev); put_ldev(mdev);
} }
if (mdev->state.conn == C_AHEAD) { if (mdev->state.conn == C_AHEAD) {
ok = drbd_send_ack(mdev, P_RS_CANCEL, e); ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
} else if (likely((e->flags & EE_WAS_ERROR) == 0)) { } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev); inc_rs_pending(mdev);
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
} else { } else {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Not sending RSDataReply, " dev_err(DEV, "Not sending RSDataReply, "
...@@ -989,17 +990,17 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -989,17 +990,17 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
} else { } else {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
(unsigned long long)e->i.sector); (unsigned long long)peer_req->i.sector);
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */ /* update resync data with failure */
drbd_rs_failed_io(mdev, e->i.sector, e->i.size); drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
} }
dec_unacked(mdev); dec_unacked(mdev);
move_to_net_ee_or_free(mdev, e); move_to_net_ee_or_free(mdev, peer_req);
if (unlikely(!ok)) if (unlikely(!ok))
dev_err(DEV, "drbd_send_block() failed\n"); dev_err(DEV, "drbd_send_block() failed\n");
...@@ -1008,26 +1009,26 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1008,26 +1009,26 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{ {
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct digest_info *di; struct digest_info *di;
int digest_size; int digest_size;
void *digest = NULL; void *digest = NULL;
int ok, eq = 0; int ok, eq = 0;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 1; return 1;
} }
if (get_ldev(mdev)) { if (get_ldev(mdev)) {
drbd_rs_complete_io(mdev, e->i.sector); drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev); put_ldev(mdev);
} }
di = e->digest; di = peer_req->digest;
if (likely((e->flags & EE_WAS_ERROR) == 0)) { if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration. /* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved, * a real fix would be much more involved,
* introducing more locking mechanisms */ * introducing more locking mechanisms */
...@@ -1037,31 +1038,31 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1037,31 +1038,31 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
} }
if (digest) { if (digest) {
drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size); eq = !memcmp(digest, di->digest, digest_size);
kfree(digest); kfree(digest);
} }
if (eq) { if (eq) {
drbd_set_in_sync(mdev, e->i.sector, e->i.size); drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */ /* rs_same_csums unit is BM_BLOCK_SIZE */
mdev->rs_same_csum += e->i.size >> BM_BLOCK_SHIFT; mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
} else { } else {
inc_rs_pending(mdev); inc_rs_pending(mdev);
e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */ peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di); kfree(di);
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
} }
} else { } else {
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
} }
dec_unacked(mdev); dec_unacked(mdev);
move_to_net_ee_or_free(mdev, e); move_to_net_ee_or_free(mdev, peer_req);
if (unlikely(!ok)) if (unlikely(!ok))
dev_err(DEV, "drbd_send_block/ack() failed\n"); dev_err(DEV, "drbd_send_block/ack() failed\n");
...@@ -1071,9 +1072,9 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1071,9 +1072,9 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
/* TODO merge common code with w_e_send_csum */ /* TODO merge common code with w_e_send_csum */
int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{ {
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
sector_t sector = e->i.sector; sector_t sector = peer_req->i.sector;
unsigned int size = e->i.size; unsigned int size = peer_req->i.size;
int digest_size; int digest_size;
void *digest; void *digest;
int ok = 1; int ok = 1;
...@@ -1088,8 +1089,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1088,8 +1089,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
goto out; goto out;
} }
if (likely(!(e->flags & EE_WAS_ERROR))) if (likely(!(peer_req->flags & EE_WAS_ERROR)))
drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
else else
memset(digest, 0, digest_size); memset(digest, 0, digest_size);
...@@ -1098,8 +1099,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1098,8 +1099,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in * congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */ * drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
e = NULL; peer_req = NULL;
inc_rs_pending(mdev); inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev, sector, size, ok = drbd_send_drequest_csum(mdev, sector, size,
digest, digest_size, digest, digest_size,
...@@ -1109,8 +1110,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1109,8 +1110,8 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
kfree(digest); kfree(digest);
out: out:
if (e) if (peer_req)
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return ok; return ok;
} }
...@@ -1128,16 +1129,16 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size) ...@@ -1128,16 +1129,16 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{ {
struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w); struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct digest_info *di; struct digest_info *di;
void *digest; void *digest;
sector_t sector = e->i.sector; sector_t sector = peer_req->i.sector;
unsigned int size = e->i.size; unsigned int size = peer_req->i.size;
int digest_size; int digest_size;
int ok, eq = 0; int ok, eq = 0;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 1; return 1;
} }
...@@ -1145,17 +1146,17 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1145,17 +1146,17 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */ * the resync lru has been cleaned up already */
if (get_ldev(mdev)) { if (get_ldev(mdev)) {
drbd_rs_complete_io(mdev, e->i.sector); drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev); put_ldev(mdev);
} }
di = e->digest; di = peer_req->digest;
if (likely((e->flags & EE_WAS_ERROR) == 0)) { if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->verify_tfm); digest_size = crypto_hash_digestsize(mdev->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
if (digest) { if (digest) {
drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
D_ASSERT(digest_size == di->digest_size); D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size); eq = !memcmp(digest, di->digest, digest_size);
...@@ -1168,7 +1169,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1168,7 +1169,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in * congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */ * drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, e); drbd_free_ee(mdev, peer_req);
if (!eq) if (!eq)
drbd_ov_oos_found(mdev, sector, size); drbd_ov_oos_found(mdev, sector, size);
else else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册