drbd_worker.c 47.5 KB
Newer Older
P
Philipp Reisner 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
   drbd_worker.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.

   drbd is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   drbd is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with drbd; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.

 */

#include <linux/module.h>
#include <linux/drbd.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>

#include "drbd_int.h"
#include "drbd_req.h"

41 42
static int w_make_ov_request(struct drbd_work *w, int cancel);
static int w_make_resync_request(struct drbd_work *w, int cancel);
P
Philipp Reisner 已提交
43 44 45



46 47
/* endio handlers:
 *   drbd_md_io_complete (defined here)
48 49
 *   drbd_request_endio (defined here)
 *   drbd_peer_request_endio (defined here)
50 51
 *   bm_async_io_complete (defined in drbd_bitmap.c)
 *
P
Philipp Reisner 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
 * For all these callbacks, note the following:
 * The callbacks will be called in irq context by the IDE drivers,
 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
 * Try to get the locking right :)
 *
 */


/* About the global_state_lock
   Each state transition on an device holds a read lock. In case we have
   to evaluate the sync after dependencies, we grab a write lock, because
   we need stable states on all devices for that.  */
rwlock_t global_state_lock;

/* used for synchronous meta data and bitmap IO
 * submitted by drbd_md_sync_page_io()
 */
void drbd_md_io_complete(struct bio *bio, int error)
{
	struct drbd_md_io *md_io;

	md_io = (struct drbd_md_io *)bio->bi_private;
	md_io->error = error;

	complete(&md_io->event);
}

/* reads on behalf of the partner,
 * "submitted" by the receiver
 */
82
void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
P
Philipp Reisner 已提交
83 84
{
	unsigned long flags = 0;
85
	struct drbd_conf *mdev = peer_req->w.mdev;
P
Philipp Reisner 已提交
86

87
	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
88 89
	mdev->read_cnt += peer_req->i.size >> 9;
	list_del(&peer_req->w.list);
P
Philipp Reisner 已提交
90 91
	if (list_empty(&mdev->read_ee))
		wake_up(&mdev->ee_wait);
92
	if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
93
		__drbd_chk_io_error(mdev, false);
94
	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
P
Philipp Reisner 已提交
95

96
	drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
P
Philipp Reisner 已提交
97 98 99 100
	put_ldev(mdev);
}

/* writes on behalf of the partner, or resync writes,
101
 * "submitted" by the receiver, final stage.  */
102
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
P
Philipp Reisner 已提交
103 104
{
	unsigned long flags = 0;
105
	struct drbd_conf *mdev = peer_req->w.mdev;
P
Philipp Reisner 已提交
106 107
	sector_t e_sector;
	int do_wake;
108
	u64 block_id;
P
Philipp Reisner 已提交
109 110
	int do_al_complete_io;

111
	/* after we moved peer_req to done_ee,
P
Philipp Reisner 已提交
112 113 114
	 * we may no longer access it,
	 * it may be freed/reused already!
	 * (as soon as we release the req_lock) */
115 116 117
	e_sector = peer_req->i.sector;
	do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
	block_id = peer_req->block_id;
P
Philipp Reisner 已提交
118

119
	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
120 121 122
	mdev->writ_cnt += peer_req->i.size >> 9;
	list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
	list_add_tail(&peer_req->w.list, &mdev->done_ee);
P
Philipp Reisner 已提交
123

124
	/*
125
	 * Do not remove from the write_requests tree here: we did not send the
126 127 128 129 130
	 * Ack yet and did not wake possibly waiting conflicting requests.
	 * Removed from the tree from "drbd_process_done_ee" within the
	 * appropriate w.cb (e_end_block/e_end_resync_block) or from
	 * _drbd_clear_done_ee.
	 */
P
Philipp Reisner 已提交
131

132
	do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
P
Philipp Reisner 已提交
133

134
	if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
135
		__drbd_chk_io_error(mdev, false);
136
	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
P
Philipp Reisner 已提交
137

138
	if (block_id == ID_SYNCER)
P
Philipp Reisner 已提交
139 140 141 142 143 144 145 146
		drbd_rs_complete_io(mdev, e_sector);

	if (do_wake)
		wake_up(&mdev->ee_wait);

	if (do_al_complete_io)
		drbd_al_complete_io(mdev, e_sector);

147
	wake_asender(mdev->tconn);
P
Philipp Reisner 已提交
148
	put_ldev(mdev);
149
}
P
Philipp Reisner 已提交
150

151 152 153
/* writes on behalf of the partner, or resync writes,
 * "submitted" by the receiver.
 */
154
void drbd_peer_request_endio(struct bio *bio, int error)
155
{
156
	struct drbd_peer_request *peer_req = bio->bi_private;
157
	struct drbd_conf *mdev = peer_req->w.mdev;
158 159 160
	int uptodate = bio_flagged(bio, BIO_UPTODATE);
	int is_write = bio_data_dir(bio) == WRITE;

161
	if (error && __ratelimit(&drbd_ratelimit_state))
162 163
		dev_warn(DEV, "%s: error=%d s=%llus\n",
				is_write ? "write" : "read", error,
164
				(unsigned long long)peer_req->i.sector);
165
	if (!error && !uptodate) {
166 167 168
		if (__ratelimit(&drbd_ratelimit_state))
			dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
					is_write ? "write" : "read",
169
					(unsigned long long)peer_req->i.sector);
170 171 172 173 174 175 176
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	if (error)
177
		set_bit(__EE_WAS_ERROR, &peer_req->flags);
178 179

	bio_put(bio); /* no need for the bio anymore */
180
	if (atomic_dec_and_test(&peer_req->pending_bios)) {
181
		if (is_write)
182
			drbd_endio_write_sec_final(peer_req);
183
		else
184
			drbd_endio_read_sec_final(peer_req);
185
	}
P
Philipp Reisner 已提交
186 187 188 189
}

/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
 */
190
void drbd_request_endio(struct bio *bio, int error)
P
Philipp Reisner 已提交
191
{
192
	unsigned long flags;
P
Philipp Reisner 已提交
193
	struct drbd_request *req = bio->bi_private;
194
	struct drbd_conf *mdev = req->w.mdev;
195
	struct bio_and_error m;
P
Philipp Reisner 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	enum drbd_req_event what;
	int uptodate = bio_flagged(bio, BIO_UPTODATE);

	if (!error && !uptodate) {
		dev_warn(DEV, "p %s: setting error to -EIO\n",
			 bio_data_dir(bio) == WRITE ? "write" : "read");
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	/* to avoid recursion in __req_mod */
	if (unlikely(error)) {
		what = (bio_data_dir(bio) == WRITE)
211
			? WRITE_COMPLETED_WITH_ERROR
212
			: (bio_rw(bio) == READ)
213 214
			  ? READ_COMPLETED_WITH_ERROR
			  : READ_AHEAD_COMPLETED_WITH_ERROR;
P
Philipp Reisner 已提交
215
	} else
216
		what = COMPLETED_OK;
P
Philipp Reisner 已提交
217 218 219 220

	bio_put(req->private_bio);
	req->private_bio = ERR_PTR(error);

221
	/* not req_mod(), we need irqsave here! */
222
	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
223
	__req_mod(req, what, &m);
224
	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
225 226 227

	if (m.bio)
		complete_master_bio(mdev, &m);
P
Philipp Reisner 已提交
228 229
}

230
int w_read_retry_remote(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
231 232
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
233
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
234 235 236 237 238

	/* We should not detach for read io-error,
	 * but try to WRITE the P_DATA_REPLY to the failed location,
	 * to give the disk the chance to relocate that block */

239
	spin_lock_irq(&mdev->tconn->req_lock);
240
	if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
241
		_req_mod(req, READ_RETRY_REMOTE_CANCELED);
242
		spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
243 244
		return 1;
	}
245
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
246

247
	return w_send_read_req(w, 0);
P
Philipp Reisner 已提交
248 249
}

250
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
251
		  struct drbd_peer_request *peer_req, void *digest)
252 253 254
{
	struct hash_desc desc;
	struct scatterlist sg;
255
	struct page *page = peer_req->pages;
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	struct page *tmp;
	unsigned len;

	desc.tfm = tfm;
	desc.flags = 0;

	sg_init_table(&sg, 1);
	crypto_hash_init(&desc);

	while ((tmp = page_chain_next(page))) {
		/* all but the last page will be fully used */
		sg_set_page(&sg, page, PAGE_SIZE, 0);
		crypto_hash_update(&desc, &sg, sg.length);
		page = tmp;
	}
	/* and now the last, possibly only partially used page */
272
	len = peer_req->i.size & (PAGE_SIZE - 1);
273 274 275 276 277 278
	sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
	crypto_hash_update(&desc, &sg, sg.length);
	crypto_hash_final(&desc, digest);
}

void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
P
Philipp Reisner 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
{
	struct hash_desc desc;
	struct scatterlist sg;
	struct bio_vec *bvec;
	int i;

	desc.tfm = tfm;
	desc.flags = 0;

	sg_init_table(&sg, 1);
	crypto_hash_init(&desc);

	__bio_for_each_segment(bvec, bio, i, 0) {
		sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
		crypto_hash_update(&desc, &sg, sg.length);
	}
	crypto_hash_final(&desc, digest);
}

298
/* MAYBE merge common code with w_e_end_ov_req */
299
static int w_e_send_csum(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
300
{
301 302
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
303 304
	int digest_size;
	void *digest;
305
	int ok = 1;
P
Philipp Reisner 已提交
306

307 308
	if (unlikely(cancel))
		goto out;
P
Philipp Reisner 已提交
309

310
	if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
311
		goto out;
P
Philipp Reisner 已提交
312

313 314 315
	digest_size = crypto_hash_digestsize(mdev->csums_tfm);
	digest = kmalloc(digest_size, GFP_NOIO);
	if (digest) {
316 317 318
		sector_t sector = peer_req->i.sector;
		unsigned int size = peer_req->i.size;
		drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
319
		/* Free peer_req and pages before send.
320 321 322 323
		 * In case we block on congestion, we could otherwise run into
		 * some distributed deadlock, if the other side blocks on
		 * congestion as well, because our receiver blocks in
		 * drbd_pp_alloc due to pp_in_use > max_buffers. */
324 325
		drbd_free_ee(mdev, peer_req);
		peer_req = NULL;
326 327 328 329 330 331 332 333 334
		inc_rs_pending(mdev);
		ok = drbd_send_drequest_csum(mdev, sector, size,
					     digest, digest_size,
					     P_CSUM_RS_REQUEST);
		kfree(digest);
	} else {
		dev_err(DEV, "kmalloc() of digest failed.\n");
		ok = 0;
	}
P
Philipp Reisner 已提交
335

336
out:
337 338
	if (peer_req)
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
339 340 341 342 343 344 345 346 347 348

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
	return ok;
}

#define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)

static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
349
	struct drbd_peer_request *peer_req;
P
Philipp Reisner 已提交
350 351

	if (!get_ldev(mdev))
352
		return -EIO;
P
Philipp Reisner 已提交
353

354
	if (drbd_rs_should_slow_down(mdev, sector))
355 356
		goto defer;

P
Philipp Reisner 已提交
357 358
	/* GFP_TRY, because if there is no memory available right now, this may
	 * be rescheduled for later. It is "only" background resync, after all. */
359 360
	peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
	if (!peer_req)
361
		goto defer;
P
Philipp Reisner 已提交
362

363
	peer_req->w.cb = w_e_send_csum;
364
	spin_lock_irq(&mdev->tconn->req_lock);
365
	list_add(&peer_req->w.list, &mdev->read_ee);
366
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
367

368
	atomic_add(size >> 9, &mdev->rs_sect_ev);
369
	if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
370
		return 0;
P
Philipp Reisner 已提交
371

372 373 374 375
	/* If it failed because of ENOMEM, retry should help.  If it failed
	 * because bio_add_page failed (probably broken lower level driver),
	 * retry may or may not help.
	 * If it does not, you may need to force disconnect. */
376
	spin_lock_irq(&mdev->tconn->req_lock);
377
	list_del(&peer_req->w.list);
378
	spin_unlock_irq(&mdev->tconn->req_lock);
379

380
	drbd_free_ee(mdev, peer_req);
381
defer:
382
	put_ldev(mdev);
383
	return -EAGAIN;
P
Philipp Reisner 已提交
384 385
}

386
int w_resync_timer(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
387
{
388
	struct drbd_conf *mdev = w->mdev;
389 390
	switch (mdev->state.conn) {
	case C_VERIFY_S:
391
		w_make_ov_request(w, cancel);
392 393
		break;
	case C_SYNC_TARGET:
394
		w_make_resync_request(w, cancel);
395
		break;
P
Philipp Reisner 已提交
396 397
	}

398 399 400 401 402 403 404 405
	return 1;
}

void resync_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;

	if (list_empty(&mdev->resync_work.list))
406
		drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work);
P
Philipp Reisner 已提交
407 408
}

409 410 411 412 413
static void fifo_set(struct fifo_buffer *fb, int value)
{
	int i;

	for (i = 0; i < fb->size; i++)
414
		fb->values[i] = value;
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
}

static int fifo_push(struct fifo_buffer *fb, int value)
{
	int ov;

	ov = fb->values[fb->head_index];
	fb->values[fb->head_index++] = value;

	if (fb->head_index >= fb->size)
		fb->head_index = 0;

	return ov;
}

static void fifo_add_val(struct fifo_buffer *fb, int value)
{
	int i;

	for (i = 0; i < fb->size; i++)
		fb->values[i] += value;
}

438
static int drbd_rs_controller(struct drbd_conf *mdev)
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
{
	unsigned int sect_in;  /* Number of sectors that came in since the last turn */
	unsigned int want;     /* The number of sectors we want in the proxy */
	int req_sect; /* Number of sectors to request in this turn */
	int correction; /* Number of sectors more we need in the proxy*/
	int cps; /* correction per invocation of drbd_rs_controller() */
	int steps; /* Number of time steps to plan ahead */
	int curr_corr;
	int max_sect;

	sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
	mdev->rs_in_flight -= sect_in;

	spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */

	steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */

	if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
		want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
	} else { /* normal path */
		want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
			sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
	}

	correction = want - mdev->rs_in_flight - mdev->rs_planed;

	/* Plan ahead */
	cps = correction / steps;
	fifo_add_val(&mdev->rs_plan_s, cps);
	mdev->rs_planed += cps * steps;

	/* What we do in this step */
	curr_corr = fifo_push(&mdev->rs_plan_s, 0);
	spin_unlock(&mdev->peer_seq_lock);
	mdev->rs_planed -= curr_corr;

	req_sect = sect_in + curr_corr;
	if (req_sect < 0)
		req_sect = 0;

	max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
	if (req_sect > max_sect)
		req_sect = max_sect;

	/*
	dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
		 sect_in, mdev->rs_in_flight, want, correction,
		 steps, cps, mdev->rs_planed, curr_corr, req_sect);
	*/

	return req_sect;
}

492
static int drbd_rs_number_requests(struct drbd_conf *mdev)
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
{
	int number;
	if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
		number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
		mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
	} else {
		mdev->c_sync_rate = mdev->sync_conf.rate;
		number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
	}

	/* ignore the amount of pending requests, the resync controller should
	 * throttle down to incoming reply rate soon enough anyways. */
	return number;
}

508
static int w_make_resync_request(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
509
{
510
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
511 512 513
	unsigned long bit;
	sector_t sector;
	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
514
	int max_bio_size;
515
	int number, rollback_i, size;
P
Philipp Reisner 已提交
516
	int align, queued, sndbuf;
517
	int i = 0;
P
Philipp Reisner 已提交
518 519 520 521

	if (unlikely(cancel))
		return 1;

522 523 524 525 526 527
	if (mdev->rs_total == 0) {
		/* empty resync? */
		drbd_resync_finished(mdev);
		return 1;
	}

P
Philipp Reisner 已提交
528 529 530 531 532 533 534 535 536
	if (!get_ldev(mdev)) {
		/* Since we only need to access mdev->rsync a
		   get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
		   to continue resync with a broken disk makes no sense at
		   all */
		dev_err(DEV, "Disk broke down during resync!\n");
		return 1;
	}

537
	max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
538 539
	number = drbd_rs_number_requests(mdev);
	if (number == 0)
540
		goto requeue;
P
Philipp Reisner 已提交
541 542 543

	for (i = 0; i < number; i++) {
		/* Stop generating RS requests, when half of the send buffer is filled */
544 545 546 547
		mutex_lock(&mdev->tconn->data.mutex);
		if (mdev->tconn->data.socket) {
			queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
			sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
P
Philipp Reisner 已提交
548 549 550 551
		} else {
			queued = 1;
			sndbuf = 0;
		}
552
		mutex_unlock(&mdev->tconn->data.mutex);
P
Philipp Reisner 已提交
553 554 555 556 557 558 559
		if (queued > sndbuf / 2)
			goto requeue;

next_sector:
		size = BM_BLOCK_SIZE;
		bit  = drbd_bm_find_next(mdev, mdev->bm_resync_fo);

560
		if (bit == DRBD_END_OF_BITMAP) {
P
Philipp Reisner 已提交
561 562 563 564 565 566 567
			mdev->bm_resync_fo = drbd_bm_bits(mdev);
			put_ldev(mdev);
			return 1;
		}

		sector = BM_BIT_TO_SECT(bit);

568 569
		if (drbd_rs_should_slow_down(mdev, sector) ||
		    drbd_try_rs_begin_io(mdev, sector)) {
P
Philipp Reisner 已提交
570 571 572 573 574 575 576 577 578 579
			mdev->bm_resync_fo = bit;
			goto requeue;
		}
		mdev->bm_resync_fo = bit + 1;

		if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
			drbd_rs_complete_io(mdev, sector);
			goto next_sector;
		}

580
#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
P
Philipp Reisner 已提交
581 582 583 584 585 586 587
		/* try to find some adjacent bits.
		 * we stop if we have already the maximum req size.
		 *
		 * Additionally always align bigger requests, in order to
		 * be prepared for all stripe sizes of software RAIDs.
		 */
		align = 1;
588
		rollback_i = i;
P
Philipp Reisner 已提交
589
		for (;;) {
590
			if (size + BM_BLOCK_SIZE > max_bio_size)
P
Philipp Reisner 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
				break;

			/* Be always aligned */
			if (sector & ((1<<(align+3))-1))
				break;

			/* do not cross extent boundaries */
			if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
				break;
			/* now, is it actually dirty, after all?
			 * caution, drbd_bm_test_bit is tri-state for some
			 * obscure reason; ( b == 0 ) would get the out-of-band
			 * only accidentally right because of the "oddly sized"
			 * adjustment below */
			if (drbd_bm_test_bit(mdev, bit+1) != 1)
				break;
			bit++;
			size += BM_BLOCK_SIZE;
			if ((BM_BLOCK_SIZE << align) <= size)
				align++;
			i++;
		}
		/* if we merged some,
		 * reset the offset to start the next drbd_bm_find_next from */
		if (size > BM_BLOCK_SIZE)
			mdev->bm_resync_fo = bit + 1;
#endif

		/* adjust very last sectors, in case we are oddly sized */
		if (sector + (size>>9) > capacity)
			size = (capacity-sector)<<9;
622
		if (mdev->tconn->agreed_pro_version >= 89 && mdev->csums_tfm) {
P
Philipp Reisner 已提交
623
			switch (read_for_csum(mdev, sector, size)) {
624
			case -EIO: /* Disk failure */
P
Philipp Reisner 已提交
625 626
				put_ldev(mdev);
				return 0;
627
			case -EAGAIN: /* allocation failed, or ldev busy */
P
Philipp Reisner 已提交
628 629
				drbd_rs_complete_io(mdev, sector);
				mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
630
				i = rollback_i;
P
Philipp Reisner 已提交
631
				goto requeue;
632 633 634 635 636
			case 0:
				/* everything ok */
				break;
			default:
				BUG();
P
Philipp Reisner 已提交
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			}
		} else {
			inc_rs_pending(mdev);
			if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
					       sector, size, ID_SYNCER)) {
				dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
				dec_rs_pending(mdev);
				put_ldev(mdev);
				return 0;
			}
		}
	}

	if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
		/* last syncer _request_ was sent,
		 * but the P_RS_DATA_REPLY not yet received.  sync will end (and
		 * next sync group will resume), as soon as we receive the last
		 * resync data block, and the last bit is cleared.
		 * until then resync "work" is "inactive" ...
		 */
		put_ldev(mdev);
		return 1;
	}

 requeue:
662
	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
P
Philipp Reisner 已提交
663 664 665 666 667
	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
	put_ldev(mdev);
	return 1;
}

668
static int w_make_ov_request(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
669
{
670
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
671 672 673 674 675 676 677
	int number, i, size;
	sector_t sector;
	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);

	if (unlikely(cancel))
		return 1;

678
	number = drbd_rs_number_requests(mdev);
P
Philipp Reisner 已提交
679 680 681 682 683 684 685 686 687

	sector = mdev->ov_position;
	for (i = 0; i < number; i++) {
		if (sector >= capacity) {
			return 1;
		}

		size = BM_BLOCK_SIZE;

688 689
		if (drbd_rs_should_slow_down(mdev, sector) ||
		    drbd_try_rs_begin_io(mdev, sector)) {
P
Philipp Reisner 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
			mdev->ov_position = sector;
			goto requeue;
		}

		if (sector + (size>>9) > capacity)
			size = (capacity-sector)<<9;

		inc_rs_pending(mdev);
		if (!drbd_send_ov_request(mdev, sector, size)) {
			dec_rs_pending(mdev);
			return 0;
		}
		sector += BM_SECT_PER_BIT;
	}
	mdev->ov_position = sector;

 requeue:
707
	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
P
Philipp Reisner 已提交
708 709 710 711
	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
	return 1;
}

712
int w_ov_finished(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
713
{
714
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
715 716 717 718 719 720 721
	kfree(w);
	ov_oos_print(mdev);
	drbd_resync_finished(mdev);

	return 1;
}

722
static int w_resync_finished(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
723
{
724
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
725 726 727 728 729 730 731
	kfree(w);

	drbd_resync_finished(mdev);

	return 1;
}

732 733
static void ping_peer(struct drbd_conf *mdev)
{
734 735 736 737 738 739
	struct drbd_tconn *tconn = mdev->tconn;

	clear_bit(GOT_PING_ACK, &tconn->flags);
	request_ping(tconn);
	wait_event(tconn->ping_wait,
		   test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
740 741
}

P
Philipp Reisner 已提交
742 743 744 745 746 747 748
int drbd_resync_finished(struct drbd_conf *mdev)
{
	unsigned long db, dt, dbdt;
	unsigned long n_oos;
	union drbd_state os, ns;
	struct drbd_work *w;
	char *khelper_cmd = NULL;
749
	int verify_done = 0;
P
Philipp Reisner 已提交
750 751 752 753 754 755 756 757 758 759

	/* Remove all elements from the resync LRU. Since future actions
	 * might set bits in the (main) bitmap, then the entries in the
	 * resync LRU would be wrong. */
	if (drbd_rs_del_all(mdev)) {
		/* In case this is not possible now, most probably because
		 * there are P_RS_DATA_REPLY Packets lingering on the worker's
		 * queue (or even the read operations for those packets
		 * is not finished by now).   Retry in 100ms. */

760
		schedule_timeout_interruptible(HZ / 10);
P
Philipp Reisner 已提交
761 762 763
		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
		if (w) {
			w->cb = w_resync_finished;
764
			drbd_queue_work(&mdev->tconn->data.work, w);
P
Philipp Reisner 已提交
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
			return 1;
		}
		dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
	}

	dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
	if (dt <= 0)
		dt = 1;
	db = mdev->rs_total;
	dbdt = Bit2KB(db/dt);
	mdev->rs_paused /= HZ;

	if (!get_ldev(mdev))
		goto out;

780 781
	ping_peer(mdev);

782
	spin_lock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
783 784
	os = mdev->state;

785 786
	verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);

P
Philipp Reisner 已提交
787 788 789 790 791 792 793 794 795
	/* This protects us against multiple calls (that can happen in the presence
	   of application IO), and against connectivity loss just before we arrive here. */
	if (os.conn <= C_CONNECTED)
		goto out_unlock;

	ns = os;
	ns.conn = C_CONNECTED;

	dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
796
	     verify_done ? "Online verify " : "Resync",
P
Philipp Reisner 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	     dt + mdev->rs_paused, mdev->rs_paused, dbdt);

	n_oos = drbd_bm_total_weight(mdev);

	if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
		if (n_oos) {
			dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
			      n_oos, Bit2KB(1));
			khelper_cmd = "out-of-sync";
		}
	} else {
		D_ASSERT((n_oos - mdev->rs_failed) == 0);

		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
			khelper_cmd = "after-resync-target";

		if (mdev->csums_tfm && mdev->rs_total) {
			const unsigned long s = mdev->rs_same_csum;
			const unsigned long t = mdev->rs_total;
			const int ratio =
				(t == 0)     ? 0 :
			(t < 100000) ? ((s*100)/t) : (s/(t/100));
B
Bart Van Assche 已提交
819
			dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
P
Philipp Reisner 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
			     "transferred %luK total %luK\n",
			     ratio,
			     Bit2KB(mdev->rs_same_csum),
			     Bit2KB(mdev->rs_total - mdev->rs_same_csum),
			     Bit2KB(mdev->rs_total));
		}
	}

	if (mdev->rs_failed) {
		dev_info(DEV, "            %lu failed blocks\n", mdev->rs_failed);

		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
			ns.disk = D_INCONSISTENT;
			ns.pdsk = D_UP_TO_DATE;
		} else {
			ns.disk = D_UP_TO_DATE;
			ns.pdsk = D_INCONSISTENT;
		}
	} else {
		ns.disk = D_UP_TO_DATE;
		ns.pdsk = D_UP_TO_DATE;

		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
			if (mdev->p_uuid) {
				int i;
				for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
					_drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
				drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
				_drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
			} else {
				dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
			}
		}

854 855 856 857 858 859 860 861 862 863 864 865
		if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
			/* for verify runs, we don't update uuids here,
			 * so there would be nothing to report. */
			drbd_uuid_set_bm(mdev, 0UL);
			drbd_print_uuids(mdev, "updated UUIDs");
			if (mdev->p_uuid) {
				/* Now the two UUID sets are equal, update what we
				 * know of the peer. */
				int i;
				for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
					mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
			}
P
Philipp Reisner 已提交
866 867 868 869 870
		}
	}

	_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
871
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
872 873 874 875 876
	put_ldev(mdev);
out:
	mdev->rs_total  = 0;
	mdev->rs_failed = 0;
	mdev->rs_paused = 0;
877 878
	if (verify_done)
		mdev->ov_start_sector = 0;
P
Philipp Reisner 已提交
879

880 881
	drbd_md_sync(mdev);

P
Philipp Reisner 已提交
882 883 884 885 886 887 888
	if (khelper_cmd)
		drbd_khelper(mdev, khelper_cmd);

	return 1;
}

/* helper */
889
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
P
Philipp Reisner 已提交
890
{
891
	if (drbd_ee_has_active_page(peer_req)) {
P
Philipp Reisner 已提交
892
		/* This might happen if sendpage() has not finished */
893
		int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
894 895
		atomic_add(i, &mdev->pp_in_use_by_net);
		atomic_sub(i, &mdev->pp_in_use);
896
		spin_lock_irq(&mdev->tconn->req_lock);
897
		list_add_tail(&peer_req->w.list, &mdev->net_ee);
898
		spin_unlock_irq(&mdev->tconn->req_lock);
899
		wake_up(&drbd_pp_wait);
P
Philipp Reisner 已提交
900
	} else
901
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
902 903 904 905 906 907 908 909
}

/**
 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
910
int w_e_end_data_req(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
911
{
912
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
913
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
914 915 916
	int ok;

	if (unlikely(cancel)) {
917
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
918 919 920 921
		dec_unacked(mdev);
		return 1;
	}

922 923
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
		ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
P
Philipp Reisner 已提交
924 925 926
	} else {
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
927
			    (unsigned long long)peer_req->i.sector);
P
Philipp Reisner 已提交
928

929
		ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
P
Philipp Reisner 已提交
930 931 932 933
	}

	dec_unacked(mdev);

934
	move_to_net_ee_or_free(mdev, peer_req);
P
Philipp Reisner 已提交
935 936 937 938 939 940 941 942 943 944 945 946

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_block() failed\n");
	return ok;
}

/**
 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
947
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
948
{
949
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
950
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
951 952 953
	int ok;

	if (unlikely(cancel)) {
954
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
955 956 957 958 959
		dec_unacked(mdev);
		return 1;
	}

	if (get_ldev_if_state(mdev, D_FAILED)) {
960
		drbd_rs_complete_io(mdev, peer_req->i.sector);
P
Philipp Reisner 已提交
961 962 963
		put_ldev(mdev);
	}

964
	if (mdev->state.conn == C_AHEAD) {
965 966
		ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
	} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
P
Philipp Reisner 已提交
967 968
		if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
			inc_rs_pending(mdev);
969
			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
P
Philipp Reisner 已提交
970 971 972 973 974 975 976 977 978
		} else {
			if (__ratelimit(&drbd_ratelimit_state))
				dev_err(DEV, "Not sending RSDataReply, "
				    "partner DISKLESS!\n");
			ok = 1;
		}
	} else {
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
979
			    (unsigned long long)peer_req->i.sector);
P
Philipp Reisner 已提交
980

981
		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
P
Philipp Reisner 已提交
982 983

		/* update resync data with failure */
984
		drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
P
Philipp Reisner 已提交
985 986 987 988
	}

	dec_unacked(mdev);

989
	move_to_net_ee_or_free(mdev, peer_req);
P
Philipp Reisner 已提交
990 991 992 993 994 995

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_block() failed\n");
	return ok;
}

996
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
997
{
998
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
999
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
1000 1001 1002 1003 1004 1005
	struct digest_info *di;
	int digest_size;
	void *digest = NULL;
	int ok, eq = 0;

	if (unlikely(cancel)) {
1006
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1007 1008 1009 1010
		dec_unacked(mdev);
		return 1;
	}

1011
	if (get_ldev(mdev)) {
1012
		drbd_rs_complete_io(mdev, peer_req->i.sector);
1013 1014
		put_ldev(mdev);
	}
P
Philipp Reisner 已提交
1015

1016
	di = peer_req->digest;
P
Philipp Reisner 已提交
1017

1018
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
P
Philipp Reisner 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027
		/* quick hack to try to avoid a race against reconfiguration.
		 * a real fix would be much more involved,
		 * introducing more locking mechanisms */
		if (mdev->csums_tfm) {
			digest_size = crypto_hash_digestsize(mdev->csums_tfm);
			D_ASSERT(digest_size == di->digest_size);
			digest = kmalloc(digest_size, GFP_NOIO);
		}
		if (digest) {
1028
			drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
P
Philipp Reisner 已提交
1029 1030 1031 1032 1033
			eq = !memcmp(digest, di->digest, digest_size);
			kfree(digest);
		}

		if (eq) {
1034
			drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
1035
			/* rs_same_csums unit is BM_BLOCK_SIZE */
1036 1037
			mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
			ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
P
Philipp Reisner 已提交
1038 1039
		} else {
			inc_rs_pending(mdev);
1040 1041
			peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
			peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1042
			kfree(di);
1043
			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
P
Philipp Reisner 已提交
1044 1045
		}
	} else {
1046
		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
P
Philipp Reisner 已提交
1047 1048 1049 1050 1051
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
	}

	dec_unacked(mdev);
1052
	move_to_net_ee_or_free(mdev, peer_req);
P
Philipp Reisner 已提交
1053 1054 1055 1056 1057 1058

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_block/ack() failed\n");
	return ok;
}

1059
int w_e_end_ov_req(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1060
{
1061
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1062
	struct drbd_conf *mdev = w->mdev;
1063 1064
	sector_t sector = peer_req->i.sector;
	unsigned int size = peer_req->i.size;
P
Philipp Reisner 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073
	int digest_size;
	void *digest;
	int ok = 1;

	if (unlikely(cancel))
		goto out;

	digest_size = crypto_hash_digestsize(mdev->verify_tfm);
	digest = kmalloc(digest_size, GFP_NOIO);
1074 1075 1076
	if (!digest) {
		ok = 0;	/* terminate the connection in case the allocation failed */
		goto out;
P
Philipp Reisner 已提交
1077 1078
	}

1079 1080
	if (likely(!(peer_req->flags & EE_WAS_ERROR)))
		drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
1081 1082 1083
	else
		memset(digest, 0, digest_size);

1084 1085 1086 1087 1088
	/* Free e and pages before send.
	 * In case we block on congestion, we could otherwise run into
	 * some distributed deadlock, if the other side blocks on
	 * congestion as well, because our receiver blocks in
	 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1089 1090
	drbd_free_ee(mdev, peer_req);
	peer_req = NULL;
1091
	inc_rs_pending(mdev);
1092 1093 1094
	ok = drbd_send_drequest_csum(mdev, sector, size,
				     digest, digest_size,
				     P_OV_REPLY);
1095 1096 1097 1098
	if (!ok)
		dec_rs_pending(mdev);
	kfree(digest);

P
Philipp Reisner 已提交
1099
out:
1100 1101
	if (peer_req)
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	dec_unacked(mdev);
	return ok;
}

void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
{
	if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
		mdev->ov_last_oos_size += size>>9;
	} else {
		mdev->ov_last_oos_start = sector;
		mdev->ov_last_oos_size = size>>9;
	}
	drbd_set_out_of_sync(mdev, sector, size);
}

1117
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1118
{
1119
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1120
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
1121 1122
	struct digest_info *di;
	void *digest;
1123 1124
	sector_t sector = peer_req->i.sector;
	unsigned int size = peer_req->i.size;
1125
	int digest_size;
P
Philipp Reisner 已提交
1126 1127 1128
	int ok, eq = 0;

	if (unlikely(cancel)) {
1129
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1130 1131 1132 1133 1134 1135
		dec_unacked(mdev);
		return 1;
	}

	/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
	 * the resync lru has been cleaned up already */
1136
	if (get_ldev(mdev)) {
1137
		drbd_rs_complete_io(mdev, peer_req->i.sector);
1138 1139
		put_ldev(mdev);
	}
P
Philipp Reisner 已提交
1140

1141
	di = peer_req->digest;
P
Philipp Reisner 已提交
1142

1143
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
P
Philipp Reisner 已提交
1144 1145 1146
		digest_size = crypto_hash_digestsize(mdev->verify_tfm);
		digest = kmalloc(digest_size, GFP_NOIO);
		if (digest) {
1147
			drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
P
Philipp Reisner 已提交
1148 1149 1150 1151 1152 1153 1154

			D_ASSERT(digest_size == di->digest_size);
			eq = !memcmp(digest, di->digest, digest_size);
			kfree(digest);
		}
	}

1155 1156 1157 1158 1159
	/* Free peer_req and pages before send.
	 * In case we block on congestion, we could otherwise run into
	 * some distributed deadlock, if the other side blocks on
	 * congestion as well, because our receiver blocks in
	 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1160
	drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1161
	if (!eq)
1162
		drbd_ov_oos_found(mdev, sector, size);
P
Philipp Reisner 已提交
1163 1164 1165
	else
		ov_oos_print(mdev);

1166
	ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
P
Philipp Reisner 已提交
1167 1168
			      eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);

1169
	dec_unacked(mdev);
P
Philipp Reisner 已提交
1170

1171 1172 1173 1174 1175 1176 1177
	--mdev->ov_left;

	/* let's advance progress step marks only for every other megabyte */
	if ((mdev->ov_left & 0x200) == 0x200)
		drbd_advance_rs_marks(mdev, mdev->ov_left);

	if (mdev->ov_left == 0) {
P
Philipp Reisner 已提交
1178 1179 1180 1181 1182 1183 1184
		ov_oos_print(mdev);
		drbd_resync_finished(mdev);
	}

	return ok;
}

1185
int w_prev_work_done(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1186 1187
{
	struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1188

P
Philipp Reisner 已提交
1189 1190 1191 1192
	complete(&b->done);
	return 1;
}

1193
int w_send_barrier(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1194 1195
{
	struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1196
	struct drbd_conf *mdev = w->mdev;
1197
	struct p_barrier *p = &mdev->tconn->data.sbuf.barrier;
P
Philipp Reisner 已提交
1198 1199 1200 1201 1202 1203 1204
	int ok = 1;

	/* really avoid racing with tl_clear.  w.cb may have been referenced
	 * just before it was reassigned and re-queued, so double check that.
	 * actually, this race was harmless, since we only try to send the
	 * barrier packet here, and otherwise do nothing with the object.
	 * but compare with the head of w_clear_epoch */
1205
	spin_lock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
1206 1207
	if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
		cancel = 1;
1208
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
1209 1210 1211
	if (cancel)
		return 1;

1212
	if (!drbd_get_data_sock(mdev->tconn))
P
Philipp Reisner 已提交
1213 1214 1215 1216 1217
		return 0;
	p->barrier = b->br_number;
	/* inc_ap_pending was done where this was queued.
	 * dec_ap_pending will be done in got_BarrierAck
	 * or (on connection loss) in w_clear_epoch.  */
1218
	ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
1219
			    &p->head, sizeof(*p), 0);
1220
	drbd_put_data_sock(mdev->tconn);
P
Philipp Reisner 已提交
1221 1222 1223 1224

	return ok;
}

1225
int w_send_write_hint(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1226
{
1227
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
1228 1229 1230 1231 1232
	if (cancel)
		return 1;
	return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
}

1233
int w_send_oos(struct drbd_work *w, int cancel)
1234 1235
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
1236
	struct drbd_conf *mdev = w->mdev;
1237 1238 1239
	int ok;

	if (unlikely(cancel)) {
1240
		req_mod(req, SEND_CANCELED);
1241 1242 1243 1244
		return 1;
	}

	ok = drbd_send_oos(mdev, req);
1245
	req_mod(req, OOS_HANDED_TO_NETWORK);
1246 1247 1248 1249

	return ok;
}

P
Philipp Reisner 已提交
1250 1251 1252 1253 1254 1255
/**
 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
1256
int w_send_dblock(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1257 1258
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
1259
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
1260 1261 1262
	int ok;

	if (unlikely(cancel)) {
1263
		req_mod(req, SEND_CANCELED);
P
Philipp Reisner 已提交
1264 1265 1266 1267
		return 1;
	}

	ok = drbd_send_dblock(mdev, req);
1268
	req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
P
Philipp Reisner 已提交
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278

	return ok;
}

/**
 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
1279
int w_send_read_req(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1280 1281
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
1282
	struct drbd_conf *mdev = w->mdev;
P
Philipp Reisner 已提交
1283 1284 1285
	int ok;

	if (unlikely(cancel)) {
1286
		req_mod(req, SEND_CANCELED);
P
Philipp Reisner 已提交
1287 1288 1289
		return 1;
	}

1290
	ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
P
Philipp Reisner 已提交
1291 1292 1293 1294 1295 1296 1297 1298
				(unsigned long)req);

	if (!ok) {
		/* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
		 * so this is probably redundant */
		if (mdev->state.conn >= C_CONNECTED)
			drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
	}
1299
	req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
P
Philipp Reisner 已提交
1300 1301 1302 1303

	return ok;
}

1304
int w_restart_disk_io(struct drbd_work *w, int cancel)
1305 1306
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
1307
	struct drbd_conf *mdev = w->mdev;
1308

1309
	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1310
		drbd_al_begin_io(mdev, req->i.sector);
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
	/* Calling drbd_al_begin_io() out of the worker might deadlocks
	   theoretically. Practically it can not deadlock, since this is
	   only used when unfreezing IOs. All the extents of the requests
	   that made it into the TL are already active */

	drbd_req_make_private_bio(req, req->master_bio);
	req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
	generic_make_request(req->private_bio);

	return 1;
}

P
Philipp Reisner 已提交
1323 1324 1325 1326 1327 1328 1329 1330
static int _drbd_may_sync_now(struct drbd_conf *mdev)
{
	struct drbd_conf *odev = mdev;

	while (1) {
		if (odev->sync_conf.after == -1)
			return 1;
		odev = minor_to_mdev(odev->sync_conf.after);
1331 1332
		if (!expect(odev))
			return 1;
P
Philipp Reisner 已提交
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
		if ((odev->state.conn >= C_SYNC_SOURCE &&
		     odev->state.conn <= C_PAUSED_SYNC_T) ||
		    odev->state.aftr_isp || odev->state.peer_isp ||
		    odev->state.user_isp)
			return 0;
	}
}

/**
 * _drbd_pause_after() - Pause resync on all devices that may not resync now
 * @mdev:	DRBD device.
 *
 * Called from process context only (admin command and after_state_ch).
 */
static int _drbd_pause_after(struct drbd_conf *mdev)
{
	struct drbd_conf *odev;
	int i, rv = 0;

	for (i = 0; i < minor_count; i++) {
		odev = minor_to_mdev(i);
		if (!odev)
			continue;
		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
			continue;
		if (!_drbd_may_sync_now(odev))
			rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
			       != SS_NOTHING_TO_DO);
	}

	return rv;
}

/**
 * _drbd_resume_next() - Resume resync on all devices that may resync now
 * @mdev:	DRBD device.
 *
 * Called from process context only (admin command and worker).
 */
static int _drbd_resume_next(struct drbd_conf *mdev)
{
	struct drbd_conf *odev;
	int i, rv = 0;

	for (i = 0; i < minor_count; i++) {
		odev = minor_to_mdev(i);
		if (!odev)
			continue;
		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
			continue;
		if (odev->state.aftr_isp) {
			if (_drbd_may_sync_now(odev))
				rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
							CS_HARD, NULL)
				       != SS_NOTHING_TO_DO) ;
		}
	}
	return rv;
}

void resume_next_sg(struct drbd_conf *mdev)
{
	write_lock_irq(&global_state_lock);
	_drbd_resume_next(mdev);
	write_unlock_irq(&global_state_lock);
}

void suspend_other_sg(struct drbd_conf *mdev)
{
	write_lock_irq(&global_state_lock);
	_drbd_pause_after(mdev);
	write_unlock_irq(&global_state_lock);
}

static int sync_after_error(struct drbd_conf *mdev, int o_minor)
{
	struct drbd_conf *odev;

	if (o_minor == -1)
		return NO_ERROR;
	if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
		return ERR_SYNC_AFTER;

	/* check for loops */
	odev = minor_to_mdev(o_minor);
	while (1) {
		if (odev == mdev)
			return ERR_SYNC_AFTER_CYCLE;

		/* dependency chain ends here, no cycles. */
		if (odev->sync_conf.after == -1)
			return NO_ERROR;

		/* follow the dependency chain */
		odev = minor_to_mdev(odev->sync_conf.after);
	}
}

int drbd_alter_sa(struct drbd_conf *mdev, int na)
{
	int changes;
	int retcode;

	write_lock_irq(&global_state_lock);
	retcode = sync_after_error(mdev, na);
	if (retcode == NO_ERROR) {
		mdev->sync_conf.after = na;
		do {
			changes  = _drbd_pause_after(mdev);
			changes |= _drbd_resume_next(mdev);
		} while (changes);
	}
	write_unlock_irq(&global_state_lock);
	return retcode;
}

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
void drbd_rs_controller_reset(struct drbd_conf *mdev)
{
	atomic_set(&mdev->rs_sect_in, 0);
	atomic_set(&mdev->rs_sect_ev, 0);
	mdev->rs_in_flight = 0;
	mdev->rs_planed = 0;
	spin_lock(&mdev->peer_seq_lock);
	fifo_set(&mdev->rs_plan_s, 0);
	spin_unlock(&mdev->peer_seq_lock);
}

P
Philipp Reisner 已提交
1460 1461 1462 1463 1464 1465 1466
void start_resync_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;

	drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work);
}

1467
int w_start_resync(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1468
{
1469 1470
	struct drbd_conf *mdev = w->mdev;

P
Philipp Reisner 已提交
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
	if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
		dev_warn(DEV, "w_start_resync later...\n");
		mdev->start_resync_timer.expires = jiffies + HZ/10;
		add_timer(&mdev->start_resync_timer);
		return 1;
	}

	drbd_start_resync(mdev, C_SYNC_SOURCE);
	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
	return 1;
}

P
Philipp Reisner 已提交
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
/**
 * drbd_start_resync() - Start the resync process
 * @mdev:	DRBD device.
 * @side:	Either C_SYNC_SOURCE or C_SYNC_TARGET
 *
 * This function might bring you directly into one of the
 * C_PAUSED_SYNC_* states.
 */
void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
{
	union drbd_state ns;
	int r;

1496
	if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
P
Philipp Reisner 已提交
1497 1498 1499 1500
		dev_err(DEV, "Resync already running!\n");
		return;
	}

1501 1502 1503 1504 1505 1506 1507
	if (mdev->state.conn < C_AHEAD) {
		/* In case a previous resync run was aborted by an IO error/detach on the peer. */
		drbd_rs_cancel_all(mdev);
		/* This should be done when we abort the resync. We definitely do not
		   want to have this for connections going back and forth between
		   Ahead/Behind and SyncSource/SyncTarget */
	}
P
Philipp Reisner 已提交
1508

1509 1510 1511 1512 1513 1514 1515 1516 1517
	if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
		if (side == C_SYNC_TARGET) {
			/* Since application IO was locked out during C_WF_BITMAP_T and
			   C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
			   we check that we might make the data inconsistent. */
			r = drbd_khelper(mdev, "before-resync-target");
			r = (r >> 8) & 0xff;
			if (r > 0) {
				dev_info(DEV, "before-resync-target handler returned %d, "
1518 1519 1520 1521
					 "dropping connection.\n", r);
				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
				return;
			}
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
		} else /* C_SYNC_SOURCE */ {
			r = drbd_khelper(mdev, "before-resync-source");
			r = (r >> 8) & 0xff;
			if (r > 0) {
				if (r == 3) {
					dev_info(DEV, "before-resync-source handler returned %d, "
						 "ignoring. Old userland tools?", r);
				} else {
					dev_info(DEV, "before-resync-source handler returned %d, "
						 "dropping connection.\n", r);
					drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
					return;
				}
			}
1536
		}
P
Philipp Reisner 已提交
1537 1538
	}

1539
	if (current == mdev->tconn->worker.task) {
1540
		/* The worker should not sleep waiting for state_mutex,
1541
		   that can take long */
1542
		if (!mutex_trylock(mdev->state_mutex)) {
1543 1544 1545 1546 1547 1548
			set_bit(B_RS_H_DONE, &mdev->flags);
			mdev->start_resync_timer.expires = jiffies + HZ/5;
			add_timer(&mdev->start_resync_timer);
			return;
		}
	} else {
1549
		mutex_lock(mdev->state_mutex);
1550 1551
	}
	clear_bit(B_RS_H_DONE, &mdev->flags);
P
Philipp Reisner 已提交
1552 1553

	if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1554
		mutex_unlock(mdev->state_mutex);
P
Philipp Reisner 已提交
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
		return;
	}

	write_lock_irq(&global_state_lock);
	ns = mdev->state;

	ns.aftr_isp = !_drbd_may_sync_now(mdev);

	ns.conn = side;

	if (side == C_SYNC_TARGET)
		ns.disk = D_INCONSISTENT;
	else /* side == C_SYNC_SOURCE */
		ns.pdsk = D_INCONSISTENT;

	r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
	ns = mdev->state;

	if (ns.conn < C_CONNECTED)
		r = SS_UNKNOWN_ERROR;

	if (r == SS_SUCCESS) {
1577 1578 1579 1580
		unsigned long tw = drbd_bm_total_weight(mdev);
		unsigned long now = jiffies;
		int i;

P
Philipp Reisner 已提交
1581 1582 1583
		mdev->rs_failed    = 0;
		mdev->rs_paused    = 0;
		mdev->rs_same_csum = 0;
1584 1585
		mdev->rs_last_events = 0;
		mdev->rs_last_sect_ev = 0;
1586 1587 1588 1589 1590 1591
		mdev->rs_total     = tw;
		mdev->rs_start     = now;
		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
			mdev->rs_mark_left[i] = tw;
			mdev->rs_mark_time[i] = now;
		}
P
Philipp Reisner 已提交
1592 1593 1594
		_drbd_pause_after(mdev);
	}
	write_unlock_irq(&global_state_lock);
1595

P
Philipp Reisner 已提交
1596 1597 1598 1599 1600
	if (r == SS_SUCCESS) {
		dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
		     drbd_conn_str(ns.conn),
		     (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
		     (unsigned long) mdev->rs_total);
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
		if (side == C_SYNC_TARGET)
			mdev->bm_resync_fo = 0;

		/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
		 * with w_send_oos, or the sync target will get confused as to
		 * how much bits to resync.  We cannot do that always, because for an
		 * empty resync and protocol < 95, we need to do it here, as we call
		 * drbd_resync_finished from here in that case.
		 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
		 * and from after_state_ch otherwise. */
1611
		if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
1612
			drbd_gen_and_send_sync_uuid(mdev);
P
Philipp Reisner 已提交
1613

1614
		if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
			/* This still has a race (about when exactly the peers
			 * detect connection loss) that can lead to a full sync
			 * on next handshake. In 8.3.9 we fixed this with explicit
			 * resync-finished notifications, but the fix
			 * introduces a protocol change.  Sleeping for some
			 * time longer than the ping interval + timeout on the
			 * SyncSource, to give the SyncTarget the chance to
			 * detect connection loss, then waiting for a ping
			 * response (implicit in drbd_resync_finished) reduces
			 * the race considerably, but does not solve it. */
			if (side == C_SYNC_SOURCE)
				schedule_timeout_interruptible(
1627 1628
					mdev->tconn->net_conf->ping_int * HZ +
					mdev->tconn->net_conf->ping_timeo*HZ/9);
P
Philipp Reisner 已提交
1629 1630 1631
			drbd_resync_finished(mdev);
		}

1632
		drbd_rs_controller_reset(mdev);
P
Philipp Reisner 已提交
1633 1634 1635 1636 1637 1638 1639 1640 1641
		/* ns.conn may already be != mdev->state.conn,
		 * we may have been paused in between, or become paused until
		 * the timer triggers.
		 * No matter, that is handled in resync_timer_fn() */
		if (ns.conn == C_SYNC_TARGET)
			mod_timer(&mdev->resync_timer, jiffies);

		drbd_md_sync(mdev);
	}
1642
	put_ldev(mdev);
1643
	mutex_unlock(mdev->state_mutex);
P
Philipp Reisner 已提交
1644 1645
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
static int _worker_dying(int vnr, void *p, void *data)
{
	struct drbd_conf *mdev = (struct drbd_conf *)p;

	D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
	/* _drbd_set_state only uses stop_nowait.
	 * wait here for the exiting receiver. */
	drbd_thread_stop(&mdev->tconn->receiver);
	drbd_mdev_cleanup(mdev);

	clear_bit(DEVICE_DYING, &mdev->flags);
	clear_bit(CONFIG_PENDING, &mdev->flags);
	wake_up(&mdev->state_wait);

	return 0;
}

P
Philipp Reisner 已提交
1663 1664
int drbd_worker(struct drbd_thread *thi)
{
1665
	struct drbd_tconn *tconn = thi->tconn;
P
Philipp Reisner 已提交
1666 1667
	struct drbd_work *w = NULL;
	LIST_HEAD(work_list);
1668
	int intr = 0;
P
Philipp Reisner 已提交
1669

1670
	while (get_t_state(thi) == RUNNING) {
1671
		drbd_thread_current_set_cpu(thi);
P
Philipp Reisner 已提交
1672

1673 1674 1675 1676 1677
		if (down_trylock(&tconn->data.work.s)) {
			mutex_lock(&tconn->data.mutex);
			if (tconn->data.socket && !tconn->net_conf->no_cork)
				drbd_tcp_uncork(tconn->data.socket);
			mutex_unlock(&tconn->data.mutex);
P
Philipp Reisner 已提交
1678

1679
			intr = down_interruptible(&tconn->data.work.s);
P
Philipp Reisner 已提交
1680

1681 1682 1683 1684
			mutex_lock(&tconn->data.mutex);
			if (tconn->data.socket  && !tconn->net_conf->no_cork)
				drbd_tcp_cork(tconn->data.socket);
			mutex_unlock(&tconn->data.mutex);
P
Philipp Reisner 已提交
1685 1686 1687 1688
		}

		if (intr) {
			flush_signals(current);
1689 1690
			if (get_t_state(thi) == RUNNING) {
				conn_warn(tconn, "Worker got an unexpected signal\n");
P
Philipp Reisner 已提交
1691
				continue;
1692
			}
P
Philipp Reisner 已提交
1693 1694 1695
			break;
		}

1696
		if (get_t_state(thi) != RUNNING)
P
Philipp Reisner 已提交
1697 1698 1699 1700 1701 1702
			break;
		/* With this break, we have done a down() but not consumed
		   the entry from the list. The cleanup code takes care of
		   this...   */

		w = NULL;
1703 1704
		spin_lock_irq(&tconn->data.work.q_lock);
		if (list_empty(&tconn->data.work.q)) {
P
Philipp Reisner 已提交
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
			/* something terribly wrong in our logic.
			 * we were able to down() the semaphore,
			 * but the list is empty... doh.
			 *
			 * what is the best thing to do now?
			 * try again from scratch, restarting the receiver,
			 * asender, whatnot? could break even more ugly,
			 * e.g. when we are primary, but no good local data.
			 *
			 * I'll try to get away just starting over this loop.
			 */
1716 1717
			conn_warn(tconn, "Work list unexpectedly empty\n");
			spin_unlock_irq(&tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1718 1719
			continue;
		}
1720
		w = list_entry(tconn->data.work.q.next, struct drbd_work, list);
P
Philipp Reisner 已提交
1721
		list_del_init(&w->list);
1722
		spin_unlock_irq(&tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1723

1724
		if (!w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS)) {
P
Philipp Reisner 已提交
1725
			/* dev_warn(DEV, "worker: a callback failed! \n"); */
1726 1727
			if (tconn->cstate >= C_WF_REPORT_PARAMS)
				conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
P
Philipp Reisner 已提交
1728 1729 1730
		}
	}

1731 1732 1733 1734
	spin_lock_irq(&tconn->data.work.q_lock);
	while (!list_empty(&tconn->data.work.q)) {
		list_splice_init(&tconn->data.work.q, &work_list);
		spin_unlock_irq(&tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1735 1736 1737 1738

		while (!list_empty(&work_list)) {
			w = list_entry(work_list.next, struct drbd_work, list);
			list_del_init(&w->list);
1739
			w->cb(w, 1);
P
Philipp Reisner 已提交
1740 1741
		}

1742
		spin_lock_irq(&tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1743
	}
1744
	sema_init(&tconn->data.work.s, 0);
P
Philipp Reisner 已提交
1745 1746 1747 1748 1749
	/* DANGEROUS race: if someone did queue his work within the spinlock,
	 * but up() ed outside the spinlock, we could get an up() on the
	 * semaphore without corresponding list entry.
	 * So don't do that.
	 */
1750
	spin_unlock_irq(&tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1751

1752
	idr_for_each(&tconn->volumes, _worker_dying, NULL);
P
Philipp Reisner 已提交
1753 1754 1755

	return 0;
}