drbd_worker.c 47.0 KB
Newer Older
P
Philipp Reisner 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
   drbd_worker.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.

   drbd is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   drbd is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with drbd; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.

 */

#include <linux/module.h>
#include <linux/drbd.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>

#include "drbd_int.h"
#include "drbd_req.h"

static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
42 43
static int w_make_resync_request(struct drbd_conf *mdev,
				 struct drbd_work *w, int cancel);
P
Philipp Reisner 已提交
44 45 46



47 48 49 50 51 52
/* endio handlers:
 *   drbd_md_io_complete (defined here)
 *   drbd_endio_pri (defined here)
 *   drbd_endio_sec (defined here)
 *   bm_async_io_complete (defined in drbd_bitmap.c)
 *
P
Philipp Reisner 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
 * For all these callbacks, note the following:
 * The callbacks will be called in irq context by the IDE drivers,
 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
 * Try to get the locking right :)
 *
 */


/* About the global_state_lock
   Each state transition on an device holds a read lock. In case we have
   to evaluate the sync after dependencies, we grab a write lock, because
   we need stable states on all devices for that.  */
rwlock_t global_state_lock;

/* used for synchronous meta data and bitmap IO
 * submitted by drbd_md_sync_page_io()
 */
void drbd_md_io_complete(struct bio *bio, int error)
{
	struct drbd_md_io *md_io;

	md_io = (struct drbd_md_io *)bio->bi_private;
	md_io->error = error;

	complete(&md_io->event);
}

/* reads on behalf of the partner,
 * "submitted" by the receiver
 */
83
void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
P
Philipp Reisner 已提交
84 85
{
	unsigned long flags = 0;
86
	struct drbd_conf *mdev = peer_req->mdev;
P
Philipp Reisner 已提交
87

88
	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
89 90
	mdev->read_cnt += peer_req->i.size >> 9;
	list_del(&peer_req->w.list);
P
Philipp Reisner 已提交
91 92
	if (list_empty(&mdev->read_ee))
		wake_up(&mdev->ee_wait);
93
	if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
94
		__drbd_chk_io_error(mdev, false);
95
	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
P
Philipp Reisner 已提交
96

97
	drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
P
Philipp Reisner 已提交
98 99 100 101
	put_ldev(mdev);
}

/* writes on behalf of the partner, or resync writes,
102
 * "submitted" by the receiver, final stage.  */
103
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
P
Philipp Reisner 已提交
104 105
{
	unsigned long flags = 0;
106
	struct drbd_conf *mdev = peer_req->mdev;
P
Philipp Reisner 已提交
107 108
	sector_t e_sector;
	int do_wake;
109
	u64 block_id;
P
Philipp Reisner 已提交
110 111
	int do_al_complete_io;

112
	/* after we moved peer_req to done_ee,
P
Philipp Reisner 已提交
113 114 115
	 * we may no longer access it,
	 * it may be freed/reused already!
	 * (as soon as we release the req_lock) */
116 117 118
	e_sector = peer_req->i.sector;
	do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
	block_id = peer_req->block_id;
P
Philipp Reisner 已提交
119

120
	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
121 122 123
	mdev->writ_cnt += peer_req->i.size >> 9;
	list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
	list_add_tail(&peer_req->w.list, &mdev->done_ee);
P
Philipp Reisner 已提交
124

125
	/*
126
	 * Do not remove from the write_requests tree here: we did not send the
127 128 129 130 131
	 * Ack yet and did not wake possibly waiting conflicting requests.
	 * Removed from the tree from "drbd_process_done_ee" within the
	 * appropriate w.cb (e_end_block/e_end_resync_block) or from
	 * _drbd_clear_done_ee.
	 */
P
Philipp Reisner 已提交
132

133
	do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
P
Philipp Reisner 已提交
134

135
	if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
136
		__drbd_chk_io_error(mdev, false);
137
	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
P
Philipp Reisner 已提交
138

139
	if (block_id == ID_SYNCER)
P
Philipp Reisner 已提交
140 141 142 143 144 145 146 147 148 149
		drbd_rs_complete_io(mdev, e_sector);

	if (do_wake)
		wake_up(&mdev->ee_wait);

	if (do_al_complete_io)
		drbd_al_complete_io(mdev, e_sector);

	wake_asender(mdev);
	put_ldev(mdev);
150
}
P
Philipp Reisner 已提交
151

152 153 154 155 156
/* writes on behalf of the partner, or resync writes,
 * "submitted" by the receiver.
 */
void drbd_endio_sec(struct bio *bio, int error)
{
157 158
	struct drbd_peer_request *peer_req = bio->bi_private;
	struct drbd_conf *mdev = peer_req->mdev;
159 160 161
	int uptodate = bio_flagged(bio, BIO_UPTODATE);
	int is_write = bio_data_dir(bio) == WRITE;

162
	if (error && __ratelimit(&drbd_ratelimit_state))
163 164
		dev_warn(DEV, "%s: error=%d s=%llus\n",
				is_write ? "write" : "read", error,
165
				(unsigned long long)peer_req->i.sector);
166
	if (!error && !uptodate) {
167 168 169
		if (__ratelimit(&drbd_ratelimit_state))
			dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
					is_write ? "write" : "read",
170
					(unsigned long long)peer_req->i.sector);
171 172 173 174 175 176 177
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	if (error)
178
		set_bit(__EE_WAS_ERROR, &peer_req->flags);
179 180

	bio_put(bio); /* no need for the bio anymore */
181
	if (atomic_dec_and_test(&peer_req->pending_bios)) {
182
		if (is_write)
183
			drbd_endio_write_sec_final(peer_req);
184
		else
185
			drbd_endio_read_sec_final(peer_req);
186
	}
P
Philipp Reisner 已提交
187 188 189 190 191 192
}

/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
 */
void drbd_endio_pri(struct bio *bio, int error)
{
193
	unsigned long flags;
P
Philipp Reisner 已提交
194 195
	struct drbd_request *req = bio->bi_private;
	struct drbd_conf *mdev = req->mdev;
196
	struct bio_and_error m;
P
Philipp Reisner 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	enum drbd_req_event what;
	int uptodate = bio_flagged(bio, BIO_UPTODATE);

	if (!error && !uptodate) {
		dev_warn(DEV, "p %s: setting error to -EIO\n",
			 bio_data_dir(bio) == WRITE ? "write" : "read");
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	/* to avoid recursion in __req_mod */
	if (unlikely(error)) {
		what = (bio_data_dir(bio) == WRITE)
212
			? WRITE_COMPLETED_WITH_ERROR
213
			: (bio_rw(bio) == READ)
214 215
			  ? READ_COMPLETED_WITH_ERROR
			  : READ_AHEAD_COMPLETED_WITH_ERROR;
P
Philipp Reisner 已提交
216
	} else
217
		what = COMPLETED_OK;
P
Philipp Reisner 已提交
218 219 220 221

	bio_put(req->private_bio);
	req->private_bio = ERR_PTR(error);

222
	/* not req_mod(), we need irqsave here! */
223
	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
224
	__req_mod(req, what, &m);
225
	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
226 227 228

	if (m.bio)
		complete_master_bio(mdev, &m);
P
Philipp Reisner 已提交
229 230 231 232 233 234 235 236 237 238
}

int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);

	/* We should not detach for read io-error,
	 * but try to WRITE the P_DATA_REPLY to the failed location,
	 * to give the disk the chance to relocate that block */

239
	spin_lock_irq(&mdev->tconn->req_lock);
240
	if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
241
		_req_mod(req, READ_RETRY_REMOTE_CANCELED);
242
		spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
243 244
		return 1;
	}
245
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
246 247 248 249

	return w_send_read_req(mdev, w, 0);
}

250
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
251
		  struct drbd_peer_request *peer_req, void *digest)
252 253 254
{
	struct hash_desc desc;
	struct scatterlist sg;
255
	struct page *page = peer_req->pages;
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	struct page *tmp;
	unsigned len;

	desc.tfm = tfm;
	desc.flags = 0;

	sg_init_table(&sg, 1);
	crypto_hash_init(&desc);

	while ((tmp = page_chain_next(page))) {
		/* all but the last page will be fully used */
		sg_set_page(&sg, page, PAGE_SIZE, 0);
		crypto_hash_update(&desc, &sg, sg.length);
		page = tmp;
	}
	/* and now the last, possibly only partially used page */
272
	len = peer_req->i.size & (PAGE_SIZE - 1);
273 274 275 276 277 278
	sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
	crypto_hash_update(&desc, &sg, sg.length);
	crypto_hash_final(&desc, digest);
}

void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
P
Philipp Reisner 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
{
	struct hash_desc desc;
	struct scatterlist sg;
	struct bio_vec *bvec;
	int i;

	desc.tfm = tfm;
	desc.flags = 0;

	sg_init_table(&sg, 1);
	crypto_hash_init(&desc);

	__bio_for_each_segment(bvec, bio, i, 0) {
		sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
		crypto_hash_update(&desc, &sg, sg.length);
	}
	crypto_hash_final(&desc, digest);
}

298 299
/* TODO merge common code with w_e_end_ov_req */
int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
300
{
301 302
	struct drbd_peer_request *peer_req =
		container_of(w, struct drbd_peer_request, w);
P
Philipp Reisner 已提交
303 304
	int digest_size;
	void *digest;
305
	int ok = 1;
P
Philipp Reisner 已提交
306

307 308
	if (unlikely(cancel))
		goto out;
P
Philipp Reisner 已提交
309

310
	if (likely((peer_req->flags & EE_WAS_ERROR) != 0))
311
		goto out;
P
Philipp Reisner 已提交
312

313 314 315
	digest_size = crypto_hash_digestsize(mdev->csums_tfm);
	digest = kmalloc(digest_size, GFP_NOIO);
	if (digest) {
316 317 318
		sector_t sector = peer_req->i.sector;
		unsigned int size = peer_req->i.size;
		drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
319 320 321 322 323
		/* Free e and pages before send.
		 * In case we block on congestion, we could otherwise run into
		 * some distributed deadlock, if the other side blocks on
		 * congestion as well, because our receiver blocks in
		 * drbd_pp_alloc due to pp_in_use > max_buffers. */
324 325
		drbd_free_ee(mdev, peer_req);
		peer_req = NULL;
326 327 328 329 330 331 332 333 334
		inc_rs_pending(mdev);
		ok = drbd_send_drequest_csum(mdev, sector, size,
					     digest, digest_size,
					     P_CSUM_RS_REQUEST);
		kfree(digest);
	} else {
		dev_err(DEV, "kmalloc() of digest failed.\n");
		ok = 0;
	}
P
Philipp Reisner 已提交
335

336
out:
337 338
	if (peer_req)
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
339 340 341 342 343 344 345 346 347 348

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
	return ok;
}

#define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)

static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
349
	struct drbd_peer_request *peer_req;
P
Philipp Reisner 已提交
350 351

	if (!get_ldev(mdev))
352
		return -EIO;
P
Philipp Reisner 已提交
353

354
	if (drbd_rs_should_slow_down(mdev, sector))
355 356
		goto defer;

P
Philipp Reisner 已提交
357 358
	/* GFP_TRY, because if there is no memory available right now, this may
	 * be rescheduled for later. It is "only" background resync, after all. */
359 360
	peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
	if (!peer_req)
361
		goto defer;
P
Philipp Reisner 已提交
362

363
	peer_req->w.cb = w_e_send_csum;
364
	spin_lock_irq(&mdev->tconn->req_lock);
365
	list_add(&peer_req->w.list, &mdev->read_ee);
366
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
367

368
	atomic_add(size >> 9, &mdev->rs_sect_ev);
369
	if (drbd_submit_ee(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
370
		return 0;
P
Philipp Reisner 已提交
371

372 373 374 375
	/* If it failed because of ENOMEM, retry should help.  If it failed
	 * because bio_add_page failed (probably broken lower level driver),
	 * retry may or may not help.
	 * If it does not, you may need to force disconnect. */
376
	spin_lock_irq(&mdev->tconn->req_lock);
377
	list_del(&peer_req->w.list);
378
	spin_unlock_irq(&mdev->tconn->req_lock);
379

380
	drbd_free_ee(mdev, peer_req);
381
defer:
382
	put_ldev(mdev);
383
	return -EAGAIN;
P
Philipp Reisner 已提交
384 385
}

386
int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
387
{
388 389
	switch (mdev->state.conn) {
	case C_VERIFY_S:
390
		w_make_ov_request(mdev, w, cancel);
391 392
		break;
	case C_SYNC_TARGET:
393
		w_make_resync_request(mdev, w, cancel);
394
		break;
P
Philipp Reisner 已提交
395 396
	}

397 398 399 400 401 402 403 404
	return 1;
}

void resync_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;

	if (list_empty(&mdev->resync_work.list))
405
		drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work);
P
Philipp Reisner 已提交
406 407
}

408 409 410 411 412
static void fifo_set(struct fifo_buffer *fb, int value)
{
	int i;

	for (i = 0; i < fb->size; i++)
413
		fb->values[i] = value;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
}

static int fifo_push(struct fifo_buffer *fb, int value)
{
	int ov;

	ov = fb->values[fb->head_index];
	fb->values[fb->head_index++] = value;

	if (fb->head_index >= fb->size)
		fb->head_index = 0;

	return ov;
}

static void fifo_add_val(struct fifo_buffer *fb, int value)
{
	int i;

	for (i = 0; i < fb->size; i++)
		fb->values[i] += value;
}

437
static int drbd_rs_controller(struct drbd_conf *mdev)
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
{
	unsigned int sect_in;  /* Number of sectors that came in since the last turn */
	unsigned int want;     /* The number of sectors we want in the proxy */
	int req_sect; /* Number of sectors to request in this turn */
	int correction; /* Number of sectors more we need in the proxy*/
	int cps; /* correction per invocation of drbd_rs_controller() */
	int steps; /* Number of time steps to plan ahead */
	int curr_corr;
	int max_sect;

	sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
	mdev->rs_in_flight -= sect_in;

	spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */

	steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */

	if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
		want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
	} else { /* normal path */
		want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
			sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
	}

	correction = want - mdev->rs_in_flight - mdev->rs_planed;

	/* Plan ahead */
	cps = correction / steps;
	fifo_add_val(&mdev->rs_plan_s, cps);
	mdev->rs_planed += cps * steps;

	/* What we do in this step */
	curr_corr = fifo_push(&mdev->rs_plan_s, 0);
	spin_unlock(&mdev->peer_seq_lock);
	mdev->rs_planed -= curr_corr;

	req_sect = sect_in + curr_corr;
	if (req_sect < 0)
		req_sect = 0;

	max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
	if (req_sect > max_sect)
		req_sect = max_sect;

	/*
	dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
		 sect_in, mdev->rs_in_flight, want, correction,
		 steps, cps, mdev->rs_planed, curr_corr, req_sect);
	*/

	return req_sect;
}

491
static int drbd_rs_number_requests(struct drbd_conf *mdev)
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
{
	int number;
	if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
		number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
		mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
	} else {
		mdev->c_sync_rate = mdev->sync_conf.rate;
		number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
	}

	/* ignore the amount of pending requests, the resync controller should
	 * throttle down to incoming reply rate soon enough anyways. */
	return number;
}

507 508
static int w_make_resync_request(struct drbd_conf *mdev,
				 struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
509 510 511 512
{
	unsigned long bit;
	sector_t sector;
	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
513
	int max_bio_size;
514
	int number, rollback_i, size;
P
Philipp Reisner 已提交
515
	int align, queued, sndbuf;
516
	int i = 0;
P
Philipp Reisner 已提交
517 518 519 520

	if (unlikely(cancel))
		return 1;

521 522 523 524 525 526
	if (mdev->rs_total == 0) {
		/* empty resync? */
		drbd_resync_finished(mdev);
		return 1;
	}

P
Philipp Reisner 已提交
527 528 529 530 531 532 533 534 535
	if (!get_ldev(mdev)) {
		/* Since we only need to access mdev->rsync a
		   get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
		   to continue resync with a broken disk makes no sense at
		   all */
		dev_err(DEV, "Disk broke down during resync!\n");
		return 1;
	}

536
	max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
537 538
	number = drbd_rs_number_requests(mdev);
	if (number == 0)
539
		goto requeue;
P
Philipp Reisner 已提交
540 541 542

	for (i = 0; i < number; i++) {
		/* Stop generating RS requests, when half of the send buffer is filled */
543 544 545 546
		mutex_lock(&mdev->tconn->data.mutex);
		if (mdev->tconn->data.socket) {
			queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
			sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
P
Philipp Reisner 已提交
547 548 549 550
		} else {
			queued = 1;
			sndbuf = 0;
		}
551
		mutex_unlock(&mdev->tconn->data.mutex);
P
Philipp Reisner 已提交
552 553 554 555 556 557 558
		if (queued > sndbuf / 2)
			goto requeue;

next_sector:
		size = BM_BLOCK_SIZE;
		bit  = drbd_bm_find_next(mdev, mdev->bm_resync_fo);

559
		if (bit == DRBD_END_OF_BITMAP) {
P
Philipp Reisner 已提交
560 561 562 563 564 565 566
			mdev->bm_resync_fo = drbd_bm_bits(mdev);
			put_ldev(mdev);
			return 1;
		}

		sector = BM_BIT_TO_SECT(bit);

567 568
		if (drbd_rs_should_slow_down(mdev, sector) ||
		    drbd_try_rs_begin_io(mdev, sector)) {
P
Philipp Reisner 已提交
569 570 571 572 573 574 575 576 577 578
			mdev->bm_resync_fo = bit;
			goto requeue;
		}
		mdev->bm_resync_fo = bit + 1;

		if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
			drbd_rs_complete_io(mdev, sector);
			goto next_sector;
		}

579
#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
P
Philipp Reisner 已提交
580 581 582 583 584 585 586
		/* try to find some adjacent bits.
		 * we stop if we have already the maximum req size.
		 *
		 * Additionally always align bigger requests, in order to
		 * be prepared for all stripe sizes of software RAIDs.
		 */
		align = 1;
587
		rollback_i = i;
P
Philipp Reisner 已提交
588
		for (;;) {
589
			if (size + BM_BLOCK_SIZE > max_bio_size)
P
Philipp Reisner 已提交
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
				break;

			/* Be always aligned */
			if (sector & ((1<<(align+3))-1))
				break;

			/* do not cross extent boundaries */
			if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
				break;
			/* now, is it actually dirty, after all?
			 * caution, drbd_bm_test_bit is tri-state for some
			 * obscure reason; ( b == 0 ) would get the out-of-band
			 * only accidentally right because of the "oddly sized"
			 * adjustment below */
			if (drbd_bm_test_bit(mdev, bit+1) != 1)
				break;
			bit++;
			size += BM_BLOCK_SIZE;
			if ((BM_BLOCK_SIZE << align) <= size)
				align++;
			i++;
		}
		/* if we merged some,
		 * reset the offset to start the next drbd_bm_find_next from */
		if (size > BM_BLOCK_SIZE)
			mdev->bm_resync_fo = bit + 1;
#endif

		/* adjust very last sectors, in case we are oddly sized */
		if (sector + (size>>9) > capacity)
			size = (capacity-sector)<<9;
621
		if (mdev->tconn->agreed_pro_version >= 89 && mdev->csums_tfm) {
P
Philipp Reisner 已提交
622
			switch (read_for_csum(mdev, sector, size)) {
623
			case -EIO: /* Disk failure */
P
Philipp Reisner 已提交
624 625
				put_ldev(mdev);
				return 0;
626
			case -EAGAIN: /* allocation failed, or ldev busy */
P
Philipp Reisner 已提交
627 628
				drbd_rs_complete_io(mdev, sector);
				mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
629
				i = rollback_i;
P
Philipp Reisner 已提交
630
				goto requeue;
631 632 633 634 635
			case 0:
				/* everything ok */
				break;
			default:
				BUG();
P
Philipp Reisner 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
			}
		} else {
			inc_rs_pending(mdev);
			if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
					       sector, size, ID_SYNCER)) {
				dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
				dec_rs_pending(mdev);
				put_ldev(mdev);
				return 0;
			}
		}
	}

	if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
		/* last syncer _request_ was sent,
		 * but the P_RS_DATA_REPLY not yet received.  sync will end (and
		 * next sync group will resume), as soon as we receive the last
		 * resync data block, and the last bit is cleared.
		 * until then resync "work" is "inactive" ...
		 */
		put_ldev(mdev);
		return 1;
	}

 requeue:
661
	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
P
Philipp Reisner 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675
	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
	put_ldev(mdev);
	return 1;
}

static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	int number, i, size;
	sector_t sector;
	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);

	if (unlikely(cancel))
		return 1;

676
	number = drbd_rs_number_requests(mdev);
P
Philipp Reisner 已提交
677 678 679 680 681 682 683 684 685

	sector = mdev->ov_position;
	for (i = 0; i < number; i++) {
		if (sector >= capacity) {
			return 1;
		}

		size = BM_BLOCK_SIZE;

686 687
		if (drbd_rs_should_slow_down(mdev, sector) ||
		    drbd_try_rs_begin_io(mdev, sector)) {
P
Philipp Reisner 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
			mdev->ov_position = sector;
			goto requeue;
		}

		if (sector + (size>>9) > capacity)
			size = (capacity-sector)<<9;

		inc_rs_pending(mdev);
		if (!drbd_send_ov_request(mdev, sector, size)) {
			dec_rs_pending(mdev);
			return 0;
		}
		sector += BM_SECT_PER_BIT;
	}
	mdev->ov_position = sector;

 requeue:
705
	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
P
Philipp Reisner 已提交
706 707 708 709
	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
	return 1;
}

710

711 712 713 714
void start_resync_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;

715
	drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work);
716 717
}

718 719
int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
720 721 722 723 724 725
	if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
		dev_warn(DEV, "w_start_resync later...\n");
		mdev->start_resync_timer.expires = jiffies + HZ/10;
		add_timer(&mdev->start_resync_timer);
		return 1;
	}
726

727 728
	drbd_start_resync(mdev, C_SYNC_SOURCE);
	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
729 730 731
	return 1;
}

P
Philipp Reisner 已提交
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	kfree(w);
	ov_oos_print(mdev);
	drbd_resync_finished(mdev);

	return 1;
}

static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	kfree(w);

	drbd_resync_finished(mdev);

	return 1;
}

750 751 752 753 754 755 756 757
static void ping_peer(struct drbd_conf *mdev)
{
	clear_bit(GOT_PING_ACK, &mdev->flags);
	request_ping(mdev);
	wait_event(mdev->misc_wait,
		   test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
}

P
Philipp Reisner 已提交
758 759 760 761 762 763 764
int drbd_resync_finished(struct drbd_conf *mdev)
{
	unsigned long db, dt, dbdt;
	unsigned long n_oos;
	union drbd_state os, ns;
	struct drbd_work *w;
	char *khelper_cmd = NULL;
765
	int verify_done = 0;
P
Philipp Reisner 已提交
766 767 768 769 770 771 772 773 774 775

	/* Remove all elements from the resync LRU. Since future actions
	 * might set bits in the (main) bitmap, then the entries in the
	 * resync LRU would be wrong. */
	if (drbd_rs_del_all(mdev)) {
		/* In case this is not possible now, most probably because
		 * there are P_RS_DATA_REPLY Packets lingering on the worker's
		 * queue (or even the read operations for those packets
		 * is not finished by now).   Retry in 100ms. */

776
		schedule_timeout_interruptible(HZ / 10);
P
Philipp Reisner 已提交
777 778 779
		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
		if (w) {
			w->cb = w_resync_finished;
780
			drbd_queue_work(&mdev->tconn->data.work, w);
P
Philipp Reisner 已提交
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
			return 1;
		}
		dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
	}

	dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
	if (dt <= 0)
		dt = 1;
	db = mdev->rs_total;
	dbdt = Bit2KB(db/dt);
	mdev->rs_paused /= HZ;

	if (!get_ldev(mdev))
		goto out;

796 797
	ping_peer(mdev);

798
	spin_lock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
799 800
	os = mdev->state;

801 802
	verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);

P
Philipp Reisner 已提交
803 804 805 806 807 808 809 810 811
	/* This protects us against multiple calls (that can happen in the presence
	   of application IO), and against connectivity loss just before we arrive here. */
	if (os.conn <= C_CONNECTED)
		goto out_unlock;

	ns = os;
	ns.conn = C_CONNECTED;

	dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
812
	     verify_done ? "Online verify " : "Resync",
P
Philipp Reisner 已提交
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
	     dt + mdev->rs_paused, mdev->rs_paused, dbdt);

	n_oos = drbd_bm_total_weight(mdev);

	if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
		if (n_oos) {
			dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
			      n_oos, Bit2KB(1));
			khelper_cmd = "out-of-sync";
		}
	} else {
		D_ASSERT((n_oos - mdev->rs_failed) == 0);

		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
			khelper_cmd = "after-resync-target";

		if (mdev->csums_tfm && mdev->rs_total) {
			const unsigned long s = mdev->rs_same_csum;
			const unsigned long t = mdev->rs_total;
			const int ratio =
				(t == 0)     ? 0 :
			(t < 100000) ? ((s*100)/t) : (s/(t/100));
B
Bart Van Assche 已提交
835
			dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
P
Philipp Reisner 已提交
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
			     "transferred %luK total %luK\n",
			     ratio,
			     Bit2KB(mdev->rs_same_csum),
			     Bit2KB(mdev->rs_total - mdev->rs_same_csum),
			     Bit2KB(mdev->rs_total));
		}
	}

	if (mdev->rs_failed) {
		dev_info(DEV, "            %lu failed blocks\n", mdev->rs_failed);

		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
			ns.disk = D_INCONSISTENT;
			ns.pdsk = D_UP_TO_DATE;
		} else {
			ns.disk = D_UP_TO_DATE;
			ns.pdsk = D_INCONSISTENT;
		}
	} else {
		ns.disk = D_UP_TO_DATE;
		ns.pdsk = D_UP_TO_DATE;

		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
			if (mdev->p_uuid) {
				int i;
				for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
					_drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
				drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
				_drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
			} else {
				dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
			}
		}

870 871 872 873 874 875 876 877 878 879 880 881
		if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
			/* for verify runs, we don't update uuids here,
			 * so there would be nothing to report. */
			drbd_uuid_set_bm(mdev, 0UL);
			drbd_print_uuids(mdev, "updated UUIDs");
			if (mdev->p_uuid) {
				/* Now the two UUID sets are equal, update what we
				 * know of the peer. */
				int i;
				for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
					mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
			}
P
Philipp Reisner 已提交
882 883 884 885 886
		}
	}

	_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
887
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
888 889 890 891 892
	put_ldev(mdev);
out:
	mdev->rs_total  = 0;
	mdev->rs_failed = 0;
	mdev->rs_paused = 0;
893 894
	if (verify_done)
		mdev->ov_start_sector = 0;
P
Philipp Reisner 已提交
895

896 897
	drbd_md_sync(mdev);

P
Philipp Reisner 已提交
898 899 900 901 902 903 904
	if (khelper_cmd)
		drbd_khelper(mdev, khelper_cmd);

	return 1;
}

/* helper */
905
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
P
Philipp Reisner 已提交
906
{
907
	if (drbd_ee_has_active_page(peer_req)) {
P
Philipp Reisner 已提交
908
		/* This might happen if sendpage() has not finished */
909
		int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
910 911
		atomic_add(i, &mdev->pp_in_use_by_net);
		atomic_sub(i, &mdev->pp_in_use);
912
		spin_lock_irq(&mdev->tconn->req_lock);
913
		list_add_tail(&peer_req->w.list, &mdev->net_ee);
914
		spin_unlock_irq(&mdev->tconn->req_lock);
915
		wake_up(&drbd_pp_wait);
P
Philipp Reisner 已提交
916
	} else
917
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
918 919 920 921 922 923 924 925 926 927
}

/**
 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
928
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
P
Philipp Reisner 已提交
929 930 931
	int ok;

	if (unlikely(cancel)) {
932
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
933 934 935 936
		dec_unacked(mdev);
		return 1;
	}

937 938
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
		ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
P
Philipp Reisner 已提交
939 940 941
	} else {
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
942
			    (unsigned long long)peer_req->i.sector);
P
Philipp Reisner 已提交
943

944
		ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
P
Philipp Reisner 已提交
945 946 947 948
	}

	dec_unacked(mdev);

949
	move_to_net_ee_or_free(mdev, peer_req);
P
Philipp Reisner 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_block() failed\n");
	return ok;
}

/**
 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
964
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
P
Philipp Reisner 已提交
965 966 967
	int ok;

	if (unlikely(cancel)) {
968
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
969 970 971 972 973
		dec_unacked(mdev);
		return 1;
	}

	if (get_ldev_if_state(mdev, D_FAILED)) {
974
		drbd_rs_complete_io(mdev, peer_req->i.sector);
P
Philipp Reisner 已提交
975 976 977
		put_ldev(mdev);
	}

978
	if (mdev->state.conn == C_AHEAD) {
979 980
		ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
	} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
P
Philipp Reisner 已提交
981 982
		if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
			inc_rs_pending(mdev);
983
			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
P
Philipp Reisner 已提交
984 985 986 987 988 989 990 991 992
		} else {
			if (__ratelimit(&drbd_ratelimit_state))
				dev_err(DEV, "Not sending RSDataReply, "
				    "partner DISKLESS!\n");
			ok = 1;
		}
	} else {
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
993
			    (unsigned long long)peer_req->i.sector);
P
Philipp Reisner 已提交
994

995
		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
P
Philipp Reisner 已提交
996 997

		/* update resync data with failure */
998
		drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
P
Philipp Reisner 已提交
999 1000 1001 1002
	}

	dec_unacked(mdev);

1003
	move_to_net_ee_or_free(mdev, peer_req);
P
Philipp Reisner 已提交
1004 1005 1006 1007 1008 1009 1010 1011

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_block() failed\n");
	return ok;
}

int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
1012
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
P
Philipp Reisner 已提交
1013 1014 1015 1016 1017 1018
	struct digest_info *di;
	int digest_size;
	void *digest = NULL;
	int ok, eq = 0;

	if (unlikely(cancel)) {
1019
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1020 1021 1022 1023
		dec_unacked(mdev);
		return 1;
	}

1024
	if (get_ldev(mdev)) {
1025
		drbd_rs_complete_io(mdev, peer_req->i.sector);
1026 1027
		put_ldev(mdev);
	}
P
Philipp Reisner 已提交
1028

1029
	di = peer_req->digest;
P
Philipp Reisner 已提交
1030

1031
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
P
Philipp Reisner 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040
		/* quick hack to try to avoid a race against reconfiguration.
		 * a real fix would be much more involved,
		 * introducing more locking mechanisms */
		if (mdev->csums_tfm) {
			digest_size = crypto_hash_digestsize(mdev->csums_tfm);
			D_ASSERT(digest_size == di->digest_size);
			digest = kmalloc(digest_size, GFP_NOIO);
		}
		if (digest) {
1041
			drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest);
P
Philipp Reisner 已提交
1042 1043 1044 1045 1046
			eq = !memcmp(digest, di->digest, digest_size);
			kfree(digest);
		}

		if (eq) {
1047
			drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
1048
			/* rs_same_csums unit is BM_BLOCK_SIZE */
1049 1050
			mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
			ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
P
Philipp Reisner 已提交
1051 1052
		} else {
			inc_rs_pending(mdev);
1053 1054
			peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
			peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1055
			kfree(di);
1056
			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
P
Philipp Reisner 已提交
1057 1058
		}
	} else {
1059
		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
P
Philipp Reisner 已提交
1060 1061 1062 1063 1064
		if (__ratelimit(&drbd_ratelimit_state))
			dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
	}

	dec_unacked(mdev);
1065
	move_to_net_ee_or_free(mdev, peer_req);
P
Philipp Reisner 已提交
1066 1067 1068 1069 1070 1071

	if (unlikely(!ok))
		dev_err(DEV, "drbd_send_block/ack() failed\n");
	return ok;
}

1072
/* TODO merge common code with w_e_send_csum */
P
Philipp Reisner 已提交
1073 1074
int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
1075 1076 1077
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	sector_t sector = peer_req->i.sector;
	unsigned int size = peer_req->i.size;
P
Philipp Reisner 已提交
1078 1079 1080 1081 1082 1083 1084 1085 1086
	int digest_size;
	void *digest;
	int ok = 1;

	if (unlikely(cancel))
		goto out;

	digest_size = crypto_hash_digestsize(mdev->verify_tfm);
	digest = kmalloc(digest_size, GFP_NOIO);
1087 1088 1089
	if (!digest) {
		ok = 0;	/* terminate the connection in case the allocation failed */
		goto out;
P
Philipp Reisner 已提交
1090 1091
	}

1092 1093
	if (likely(!(peer_req->flags & EE_WAS_ERROR)))
		drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
1094 1095 1096
	else
		memset(digest, 0, digest_size);

1097 1098 1099 1100 1101
	/* Free e and pages before send.
	 * In case we block on congestion, we could otherwise run into
	 * some distributed deadlock, if the other side blocks on
	 * congestion as well, because our receiver blocks in
	 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1102 1103
	drbd_free_ee(mdev, peer_req);
	peer_req = NULL;
1104
	inc_rs_pending(mdev);
1105 1106 1107
	ok = drbd_send_drequest_csum(mdev, sector, size,
				     digest, digest_size,
				     P_OV_REPLY);
1108 1109 1110 1111
	if (!ok)
		dec_rs_pending(mdev);
	kfree(digest);

P
Philipp Reisner 已提交
1112
out:
1113 1114
	if (peer_req)
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	dec_unacked(mdev);
	return ok;
}

void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
{
	if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
		mdev->ov_last_oos_size += size>>9;
	} else {
		mdev->ov_last_oos_start = sector;
		mdev->ov_last_oos_size = size>>9;
	}
	drbd_set_out_of_sync(mdev, sector, size);
}

int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
1132
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
P
Philipp Reisner 已提交
1133 1134
	struct digest_info *di;
	void *digest;
1135 1136
	sector_t sector = peer_req->i.sector;
	unsigned int size = peer_req->i.size;
1137
	int digest_size;
P
Philipp Reisner 已提交
1138 1139 1140
	int ok, eq = 0;

	if (unlikely(cancel)) {
1141
		drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1142 1143 1144 1145 1146 1147
		dec_unacked(mdev);
		return 1;
	}

	/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
	 * the resync lru has been cleaned up already */
1148
	if (get_ldev(mdev)) {
1149
		drbd_rs_complete_io(mdev, peer_req->i.sector);
1150 1151
		put_ldev(mdev);
	}
P
Philipp Reisner 已提交
1152

1153
	di = peer_req->digest;
P
Philipp Reisner 已提交
1154

1155
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
P
Philipp Reisner 已提交
1156 1157 1158
		digest_size = crypto_hash_digestsize(mdev->verify_tfm);
		digest = kmalloc(digest_size, GFP_NOIO);
		if (digest) {
1159
			drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest);
P
Philipp Reisner 已提交
1160 1161 1162 1163 1164 1165 1166

			D_ASSERT(digest_size == di->digest_size);
			eq = !memcmp(digest, di->digest, digest_size);
			kfree(digest);
		}
	}

1167 1168 1169 1170 1171
		/* Free e and pages before send.
		 * In case we block on congestion, we could otherwise run into
		 * some distributed deadlock, if the other side blocks on
		 * congestion as well, because our receiver blocks in
		 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1172
	drbd_free_ee(mdev, peer_req);
P
Philipp Reisner 已提交
1173
	if (!eq)
1174
		drbd_ov_oos_found(mdev, sector, size);
P
Philipp Reisner 已提交
1175 1176 1177
	else
		ov_oos_print(mdev);

1178
	ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
P
Philipp Reisner 已提交
1179 1180
			      eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);

1181
	dec_unacked(mdev);
P
Philipp Reisner 已提交
1182

1183 1184 1185 1186 1187 1188 1189
	--mdev->ov_left;

	/* let's advance progress step marks only for every other megabyte */
	if ((mdev->ov_left & 0x200) == 0x200)
		drbd_advance_rs_marks(mdev, mdev->ov_left);

	if (mdev->ov_left == 0) {
P
Philipp Reisner 已提交
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
		ov_oos_print(mdev);
		drbd_resync_finished(mdev);
	}

	return ok;
}

int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
	complete(&b->done);
	return 1;
}

int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1207
	struct p_barrier *p = &mdev->tconn->data.sbuf.barrier;
P
Philipp Reisner 已提交
1208 1209 1210 1211 1212 1213 1214
	int ok = 1;

	/* really avoid racing with tl_clear.  w.cb may have been referenced
	 * just before it was reassigned and re-queued, so double check that.
	 * actually, this race was harmless, since we only try to send the
	 * barrier packet here, and otherwise do nothing with the object.
	 * but compare with the head of w_clear_epoch */
1215
	spin_lock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
1216 1217
	if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
		cancel = 1;
1218
	spin_unlock_irq(&mdev->tconn->req_lock);
P
Philipp Reisner 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227
	if (cancel)
		return 1;

	if (!drbd_get_data_sock(mdev))
		return 0;
	p->barrier = b->br_number;
	/* inc_ap_pending was done where this was queued.
	 * dec_ap_pending will be done in got_BarrierAck
	 * or (on connection loss) in w_clear_epoch.  */
1228
	ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
1229
			    &p->head, sizeof(*p), 0);
P
Philipp Reisner 已提交
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	drbd_put_data_sock(mdev);

	return ok;
}

int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	if (cancel)
		return 1;
	return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
}

1242 1243 1244 1245 1246 1247
int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
	int ok;

	if (unlikely(cancel)) {
1248
		req_mod(req, SEND_CANCELED);
1249 1250 1251 1252
		return 1;
	}

	ok = drbd_send_oos(mdev, req);
1253
	req_mod(req, OOS_HANDED_TO_NETWORK);
1254 1255 1256 1257

	return ok;
}

P
Philipp Reisner 已提交
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
/**
 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
	int ok;

	if (unlikely(cancel)) {
1270
		req_mod(req, SEND_CANCELED);
P
Philipp Reisner 已提交
1271 1272 1273 1274
		return 1;
	}

	ok = drbd_send_dblock(mdev, req);
1275
	req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
P
Philipp Reisner 已提交
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291

	return ok;
}

/**
 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
 * @mdev:	DRBD device.
 * @w:		work object.
 * @cancel:	The connection will be closed anyways
 */
int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);
	int ok;

	if (unlikely(cancel)) {
1292
		req_mod(req, SEND_CANCELED);
P
Philipp Reisner 已提交
1293 1294 1295
		return 1;
	}

1296
	ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
P
Philipp Reisner 已提交
1297 1298 1299 1300 1301 1302 1303 1304
				(unsigned long)req);

	if (!ok) {
		/* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
		 * so this is probably redundant */
		if (mdev->state.conn >= C_CONNECTED)
			drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
	}
1305
	req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
P
Philipp Reisner 已提交
1306 1307 1308 1309

	return ok;
}

1310 1311 1312 1313
int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	struct drbd_request *req = container_of(w, struct drbd_request, w);

1314
	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1315
		drbd_al_begin_io(mdev, req->i.sector);
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	/* Calling drbd_al_begin_io() out of the worker might deadlocks
	   theoretically. Practically it can not deadlock, since this is
	   only used when unfreezing IOs. All the extents of the requests
	   that made it into the TL are already active */

	drbd_req_make_private_bio(req, req->master_bio);
	req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
	generic_make_request(req->private_bio);

	return 1;
}

P
Philipp Reisner 已提交
1328 1329 1330 1331 1332 1333 1334 1335
static int _drbd_may_sync_now(struct drbd_conf *mdev)
{
	struct drbd_conf *odev = mdev;

	while (1) {
		if (odev->sync_conf.after == -1)
			return 1;
		odev = minor_to_mdev(odev->sync_conf.after);
1336 1337
		if (!expect(odev))
			return 1;
P
Philipp Reisner 已提交
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
		if ((odev->state.conn >= C_SYNC_SOURCE &&
		     odev->state.conn <= C_PAUSED_SYNC_T) ||
		    odev->state.aftr_isp || odev->state.peer_isp ||
		    odev->state.user_isp)
			return 0;
	}
}

/**
 * _drbd_pause_after() - Pause resync on all devices that may not resync now
 * @mdev:	DRBD device.
 *
 * Called from process context only (admin command and after_state_ch).
 */
static int _drbd_pause_after(struct drbd_conf *mdev)
{
	struct drbd_conf *odev;
	int i, rv = 0;

	for (i = 0; i < minor_count; i++) {
		odev = minor_to_mdev(i);
		if (!odev)
			continue;
		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
			continue;
		if (!_drbd_may_sync_now(odev))
			rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
			       != SS_NOTHING_TO_DO);
	}

	return rv;
}

/**
 * _drbd_resume_next() - Resume resync on all devices that may resync now
 * @mdev:	DRBD device.
 *
 * Called from process context only (admin command and worker).
 */
static int _drbd_resume_next(struct drbd_conf *mdev)
{
	struct drbd_conf *odev;
	int i, rv = 0;

	for (i = 0; i < minor_count; i++) {
		odev = minor_to_mdev(i);
		if (!odev)
			continue;
		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
			continue;
		if (odev->state.aftr_isp) {
			if (_drbd_may_sync_now(odev))
				rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
							CS_HARD, NULL)
				       != SS_NOTHING_TO_DO) ;
		}
	}
	return rv;
}

void resume_next_sg(struct drbd_conf *mdev)
{
	write_lock_irq(&global_state_lock);
	_drbd_resume_next(mdev);
	write_unlock_irq(&global_state_lock);
}

void suspend_other_sg(struct drbd_conf *mdev)
{
	write_lock_irq(&global_state_lock);
	_drbd_pause_after(mdev);
	write_unlock_irq(&global_state_lock);
}

static int sync_after_error(struct drbd_conf *mdev, int o_minor)
{
	struct drbd_conf *odev;

	if (o_minor == -1)
		return NO_ERROR;
	if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
		return ERR_SYNC_AFTER;

	/* check for loops */
	odev = minor_to_mdev(o_minor);
	while (1) {
		if (odev == mdev)
			return ERR_SYNC_AFTER_CYCLE;

		/* dependency chain ends here, no cycles. */
		if (odev->sync_conf.after == -1)
			return NO_ERROR;

		/* follow the dependency chain */
		odev = minor_to_mdev(odev->sync_conf.after);
	}
}

int drbd_alter_sa(struct drbd_conf *mdev, int na)
{
	int changes;
	int retcode;

	write_lock_irq(&global_state_lock);
	retcode = sync_after_error(mdev, na);
	if (retcode == NO_ERROR) {
		mdev->sync_conf.after = na;
		do {
			changes  = _drbd_pause_after(mdev);
			changes |= _drbd_resume_next(mdev);
		} while (changes);
	}
	write_unlock_irq(&global_state_lock);
	return retcode;
}

1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
void drbd_rs_controller_reset(struct drbd_conf *mdev)
{
	atomic_set(&mdev->rs_sect_in, 0);
	atomic_set(&mdev->rs_sect_ev, 0);
	mdev->rs_in_flight = 0;
	mdev->rs_planed = 0;
	spin_lock(&mdev->peer_seq_lock);
	fifo_set(&mdev->rs_plan_s, 0);
	spin_unlock(&mdev->peer_seq_lock);
}

P
Philipp Reisner 已提交
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
/**
 * drbd_start_resync() - Start the resync process
 * @mdev:	DRBD device.
 * @side:	Either C_SYNC_SOURCE or C_SYNC_TARGET
 *
 * This function might bring you directly into one of the
 * C_PAUSED_SYNC_* states.
 */
void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
{
	union drbd_state ns;
	int r;

1478
	if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
P
Philipp Reisner 已提交
1479 1480 1481 1482
		dev_err(DEV, "Resync already running!\n");
		return;
	}

1483 1484 1485 1486 1487 1488 1489
	if (mdev->state.conn < C_AHEAD) {
		/* In case a previous resync run was aborted by an IO error/detach on the peer. */
		drbd_rs_cancel_all(mdev);
		/* This should be done when we abort the resync. We definitely do not
		   want to have this for connections going back and forth between
		   Ahead/Behind and SyncSource/SyncTarget */
	}
P
Philipp Reisner 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502

	if (side == C_SYNC_TARGET) {
		/* Since application IO was locked out during C_WF_BITMAP_T and
		   C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
		   we check that we might make the data inconsistent. */
		r = drbd_khelper(mdev, "before-resync-target");
		r = (r >> 8) & 0xff;
		if (r > 0) {
			dev_info(DEV, "before-resync-target handler returned %d, "
			     "dropping connection.\n", r);
			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
			return;
		}
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
	} else /* C_SYNC_SOURCE */ {
		r = drbd_khelper(mdev, "before-resync-source");
		r = (r >> 8) & 0xff;
		if (r > 0) {
			if (r == 3) {
				dev_info(DEV, "before-resync-source handler returned %d, "
					 "ignoring. Old userland tools?", r);
			} else {
				dev_info(DEV, "before-resync-source handler returned %d, "
					 "dropping connection.\n", r);
				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
				return;
			}
		}
P
Philipp Reisner 已提交
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
	}

	drbd_state_lock(mdev);

	if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
		drbd_state_unlock(mdev);
		return;
	}

	write_lock_irq(&global_state_lock);
	ns = mdev->state;

	ns.aftr_isp = !_drbd_may_sync_now(mdev);

	ns.conn = side;

	if (side == C_SYNC_TARGET)
		ns.disk = D_INCONSISTENT;
	else /* side == C_SYNC_SOURCE */
		ns.pdsk = D_INCONSISTENT;

	r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
	ns = mdev->state;

	if (ns.conn < C_CONNECTED)
		r = SS_UNKNOWN_ERROR;

	if (r == SS_SUCCESS) {
1545 1546 1547 1548
		unsigned long tw = drbd_bm_total_weight(mdev);
		unsigned long now = jiffies;
		int i;

P
Philipp Reisner 已提交
1549 1550 1551
		mdev->rs_failed    = 0;
		mdev->rs_paused    = 0;
		mdev->rs_same_csum = 0;
1552 1553
		mdev->rs_last_events = 0;
		mdev->rs_last_sect_ev = 0;
1554 1555 1556 1557 1558 1559
		mdev->rs_total     = tw;
		mdev->rs_start     = now;
		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
			mdev->rs_mark_left[i] = tw;
			mdev->rs_mark_time[i] = now;
		}
P
Philipp Reisner 已提交
1560 1561 1562
		_drbd_pause_after(mdev);
	}
	write_unlock_irq(&global_state_lock);
1563

P
Philipp Reisner 已提交
1564 1565 1566 1567 1568
	if (r == SS_SUCCESS) {
		dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
		     drbd_conn_str(ns.conn),
		     (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
		     (unsigned long) mdev->rs_total);
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
		if (side == C_SYNC_TARGET)
			mdev->bm_resync_fo = 0;

		/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
		 * with w_send_oos, or the sync target will get confused as to
		 * how much bits to resync.  We cannot do that always, because for an
		 * empty resync and protocol < 95, we need to do it here, as we call
		 * drbd_resync_finished from here in that case.
		 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
		 * and from after_state_ch otherwise. */
1579
		if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
1580
			drbd_gen_and_send_sync_uuid(mdev);
P
Philipp Reisner 已提交
1581

1582
		if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
			/* This still has a race (about when exactly the peers
			 * detect connection loss) that can lead to a full sync
			 * on next handshake. In 8.3.9 we fixed this with explicit
			 * resync-finished notifications, but the fix
			 * introduces a protocol change.  Sleeping for some
			 * time longer than the ping interval + timeout on the
			 * SyncSource, to give the SyncTarget the chance to
			 * detect connection loss, then waiting for a ping
			 * response (implicit in drbd_resync_finished) reduces
			 * the race considerably, but does not solve it. */
			if (side == C_SYNC_SOURCE)
				schedule_timeout_interruptible(
1595 1596
					mdev->tconn->net_conf->ping_int * HZ +
					mdev->tconn->net_conf->ping_timeo*HZ/9);
P
Philipp Reisner 已提交
1597 1598 1599
			drbd_resync_finished(mdev);
		}

1600
		drbd_rs_controller_reset(mdev);
P
Philipp Reisner 已提交
1601 1602 1603 1604 1605 1606 1607 1608 1609
		/* ns.conn may already be != mdev->state.conn,
		 * we may have been paused in between, or become paused until
		 * the timer triggers.
		 * No matter, that is handled in resync_timer_fn() */
		if (ns.conn == C_SYNC_TARGET)
			mod_timer(&mdev->resync_timer, jiffies);

		drbd_md_sync(mdev);
	}
1610
	put_ldev(mdev);
1611
	drbd_state_unlock(mdev);
P
Philipp Reisner 已提交
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
}

int drbd_worker(struct drbd_thread *thi)
{
	struct drbd_conf *mdev = thi->mdev;
	struct drbd_work *w = NULL;
	LIST_HEAD(work_list);
	int intr = 0, i;

	sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));

1623
	while (get_t_state(thi) == RUNNING) {
P
Philipp Reisner 已提交
1624 1625
		drbd_thread_current_set_cpu(mdev);

1626 1627 1628 1629 1630
		if (down_trylock(&mdev->tconn->data.work.s)) {
			mutex_lock(&mdev->tconn->data.mutex);
			if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork)
				drbd_tcp_uncork(mdev->tconn->data.socket);
			mutex_unlock(&mdev->tconn->data.mutex);
P
Philipp Reisner 已提交
1631

1632
			intr = down_interruptible(&mdev->tconn->data.work.s);
P
Philipp Reisner 已提交
1633

1634 1635 1636 1637
			mutex_lock(&mdev->tconn->data.mutex);
			if (mdev->tconn->data.socket  && !mdev->tconn->net_conf->no_cork)
				drbd_tcp_cork(mdev->tconn->data.socket);
			mutex_unlock(&mdev->tconn->data.mutex);
P
Philipp Reisner 已提交
1638 1639 1640 1641 1642
		}

		if (intr) {
			D_ASSERT(intr == -EINTR);
			flush_signals(current);
1643
			if (!expect(get_t_state(thi) != RUNNING))
P
Philipp Reisner 已提交
1644 1645 1646 1647
				continue;
			break;
		}

1648
		if (get_t_state(thi) != RUNNING)
P
Philipp Reisner 已提交
1649 1650 1651 1652 1653 1654
			break;
		/* With this break, we have done a down() but not consumed
		   the entry from the list. The cleanup code takes care of
		   this...   */

		w = NULL;
1655 1656
		spin_lock_irq(&mdev->tconn->data.work.q_lock);
		if (!expect(!list_empty(&mdev->tconn->data.work.q))) {
P
Philipp Reisner 已提交
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
			/* something terribly wrong in our logic.
			 * we were able to down() the semaphore,
			 * but the list is empty... doh.
			 *
			 * what is the best thing to do now?
			 * try again from scratch, restarting the receiver,
			 * asender, whatnot? could break even more ugly,
			 * e.g. when we are primary, but no good local data.
			 *
			 * I'll try to get away just starting over this loop.
			 */
1668
			spin_unlock_irq(&mdev->tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1669 1670
			continue;
		}
1671
		w = list_entry(mdev->tconn->data.work.q.next, struct drbd_work, list);
P
Philipp Reisner 已提交
1672
		list_del_init(&w->list);
1673
		spin_unlock_irq(&mdev->tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684

		if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
			/* dev_warn(DEV, "worker: a callback failed! \n"); */
			if (mdev->state.conn >= C_CONNECTED)
				drbd_force_state(mdev,
						NS(conn, C_NETWORK_FAILURE));
		}
	}
	D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
	D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));

1685
	spin_lock_irq(&mdev->tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1686
	i = 0;
1687 1688 1689
	while (!list_empty(&mdev->tconn->data.work.q)) {
		list_splice_init(&mdev->tconn->data.work.q, &work_list);
		spin_unlock_irq(&mdev->tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1690 1691 1692 1693 1694 1695 1696 1697

		while (!list_empty(&work_list)) {
			w = list_entry(work_list.next, struct drbd_work, list);
			list_del_init(&w->list);
			w->cb(mdev, w, 1);
			i++; /* dead debugging code */
		}

1698
		spin_lock_irq(&mdev->tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1699
	}
1700
	sema_init(&mdev->tconn->data.work.s, 0);
P
Philipp Reisner 已提交
1701 1702 1703 1704 1705
	/* DANGEROUS race: if someone did queue his work within the spinlock,
	 * but up() ed outside the spinlock, we could get an up() on the
	 * semaphore without corresponding list entry.
	 * So don't do that.
	 */
1706
	spin_unlock_irq(&mdev->tconn->data.work.q_lock);
P
Philipp Reisner 已提交
1707 1708 1709

	D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
	/* _drbd_set_state only uses stop_nowait.
1710 1711
	 * wait here for the exiting receiver. */
	drbd_thread_stop(&mdev->tconn->receiver);
P
Philipp Reisner 已提交
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	drbd_mdev_cleanup(mdev);

	dev_info(DEV, "worker terminated\n");

	clear_bit(DEVICE_DYING, &mdev->flags);
	clear_bit(CONFIG_PENDING, &mdev->flags);
	wake_up(&mdev->state_wait);

	return 0;
}