drbd_receiver.c 155.4 KB
Newer Older
P
Philipp Reisner 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/*
   drbd_receiver.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.

   drbd is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   drbd is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with drbd; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 */


#include <linux/module.h>

#include <asm/uaccess.h>
#include <net/sock.h>

#include <linux/drbd.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/in.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/pkt_sched.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include "drbd_int.h"
47
#include "drbd_protocol.h"
P
Philipp Reisner 已提交
48 49 50 51
#include "drbd_req.h"

#include "drbd_vli.h"

52 53
struct packet_info {
	enum drbd_packet cmd;
54 55
	unsigned int size;
	unsigned int vnr;
56
	void *data;
57 58
};

P
Philipp Reisner 已提交
59 60 61 62 63 64
enum finish_epoch {
	FE_STILL_LIVE,
	FE_DESTROYED,
	FE_RECYCLED,
};

65 66
static int drbd_do_features(struct drbd_connection *connection);
static int drbd_do_auth(struct drbd_connection *connection);
67
static int drbd_disconnected(struct drbd_peer_device *);
P
Philipp Reisner 已提交
68

69
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
70
static int e_end_block(struct drbd_work *, int);
P
Philipp Reisner 已提交
71 72 73 74


#define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * some helper functions to deal with single linked page lists,
 * page->private being our "next" pointer.
 */

/* If at least n pages are linked at head, get n pages off.
 * Otherwise, don't modify head, and return NULL.
 * Locking is the responsibility of the caller.
 */
static struct page *page_chain_del(struct page **head, int n)
{
	struct page *page;
	struct page *tmp;

	BUG_ON(!n);
	BUG_ON(!head);

	page = *head;
93 94 95 96

	if (!page)
		return NULL;

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	while (page) {
		tmp = page_chain_next(page);
		if (--n == 0)
			break; /* found sufficient pages */
		if (tmp == NULL)
			/* insufficient pages, don't use any of them. */
			return NULL;
		page = tmp;
	}

	/* add end of list marker for the returned list */
	set_page_private(page, 0);
	/* actual return value, and adjustment of head */
	page = *head;
	*head = tmp;
	return page;
}

/* may be used outside of locks to find the tail of a (usually short)
 * "private" page chain, before adding it back to a global chain head
 * with page_chain_add() under a spinlock. */
static struct page *page_chain_tail(struct page *page, int *len)
{
	struct page *tmp;
	int i = 1;
	while ((tmp = page_chain_next(page)))
		++i, page = tmp;
	if (len)
		*len = i;
	return page;
}

static int page_chain_free(struct page *page)
{
	struct page *tmp;
	int i = 0;
	page_chain_for_each_safe(page, tmp) {
		put_page(page);
		++i;
	}
	return i;
}

static void page_chain_add(struct page **head,
		struct page *chain_first, struct page *chain_last)
{
#if 1
	struct page *tmp;
	tmp = page_chain_tail(chain_first, NULL);
	BUG_ON(tmp != chain_last);
#endif

	/* add chain to head */
	set_page_private(chain_last, (unsigned long)*head);
	*head = chain_first;
}

154
static struct page *__drbd_alloc_pages(struct drbd_device *device,
155
				       unsigned int number)
P
Philipp Reisner 已提交
156 157
{
	struct page *page = NULL;
158
	struct page *tmp = NULL;
159
	unsigned int i = 0;
P
Philipp Reisner 已提交
160 161 162

	/* Yes, testing drbd_pp_vacant outside the lock is racy.
	 * So what. It saves a spin_lock. */
163
	if (drbd_pp_vacant >= number) {
P
Philipp Reisner 已提交
164
		spin_lock(&drbd_pp_lock);
165 166 167
		page = page_chain_del(&drbd_pp_pool, number);
		if (page)
			drbd_pp_vacant -= number;
P
Philipp Reisner 已提交
168
		spin_unlock(&drbd_pp_lock);
169 170
		if (page)
			return page;
P
Philipp Reisner 已提交
171
	}
172

P
Philipp Reisner 已提交
173 174 175
	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
	 * "criss-cross" setup, that might cause write-out on some other DRBD,
	 * which in turn might block on the other node at this very place.  */
176 177 178 179 180 181 182 183 184 185 186 187
	for (i = 0; i < number; i++) {
		tmp = alloc_page(GFP_TRY);
		if (!tmp)
			break;
		set_page_private(tmp, (unsigned long)page);
		page = tmp;
	}

	if (i == number)
		return page;

	/* Not enough pages immediately available this time.
188
	 * No need to jump around here, drbd_alloc_pages will retry this
189 190 191 192 193 194 195 196 197
	 * function "soon". */
	if (page) {
		tmp = page_chain_tail(page, NULL);
		spin_lock(&drbd_pp_lock);
		page_chain_add(&drbd_pp_pool, page, tmp);
		drbd_pp_vacant += i;
		spin_unlock(&drbd_pp_lock);
	}
	return NULL;
P
Philipp Reisner 已提交
198 199
}

200
static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
201
					   struct list_head *to_be_freed)
P
Philipp Reisner 已提交
202
{
203
	struct drbd_peer_request *peer_req, *tmp;
P
Philipp Reisner 已提交
204 205 206 207 208 209

	/* The EEs are always appended to the end of the list. Since
	   they are sent in order over the wire, they have to finish
	   in order. As soon as we see the first not finished we can
	   stop to examine the list... */

210
	list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
211
		if (drbd_peer_req_has_active_page(peer_req))
P
Philipp Reisner 已提交
212
			break;
213
		list_move(&peer_req->w.list, to_be_freed);
P
Philipp Reisner 已提交
214 215 216
	}
}

217
static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
P
Philipp Reisner 已提交
218 219
{
	LIST_HEAD(reclaimed);
220
	struct drbd_peer_request *peer_req, *t;
P
Philipp Reisner 已提交
221

222
	spin_lock_irq(&device->resource->req_lock);
223
	reclaim_finished_net_peer_reqs(device, &reclaimed);
224
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
225

226
	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
227
		drbd_free_net_peer_req(device, peer_req);
P
Philipp Reisner 已提交
228 229 230
}

/**
231
 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
232
 * @device:	DRBD device.
233 234 235 236 237 238
 * @number:	number of pages requested
 * @retry:	whether to retry, if not enough pages are available right now
 *
 * Tries to allocate number pages, first from our own page pool, then from
 * the kernel, unless this allocation would exceed the max_buffers setting.
 * Possibly retry until DRBD frees sufficient pages somewhere else.
P
Philipp Reisner 已提交
239
 *
240
 * Returns a page chain linked via page->private.
P
Philipp Reisner 已提交
241
 */
242
struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
243
			      bool retry)
P
Philipp Reisner 已提交
244
{
245
	struct drbd_device *device = peer_device->device;
P
Philipp Reisner 已提交
246
	struct page *page = NULL;
247
	struct net_conf *nc;
P
Philipp Reisner 已提交
248
	DEFINE_WAIT(wait);
249
	int mxb;
P
Philipp Reisner 已提交
250

251 252
	/* Yes, we may run up to @number over max_buffers. If we
	 * follow it strictly, the admin will get it wrong anyways. */
253
	rcu_read_lock();
254
	nc = rcu_dereference(peer_device->connection->net_conf);
255 256 257
	mxb = nc ? nc->max_buffers : 1000000;
	rcu_read_unlock();

258 259
	if (atomic_read(&device->pp_in_use) < mxb)
		page = __drbd_alloc_pages(device, number);
P
Philipp Reisner 已提交
260

261
	while (page == NULL) {
P
Philipp Reisner 已提交
262 263
		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);

264
		drbd_kick_lo_and_reclaim_net(device);
P
Philipp Reisner 已提交
265

266 267
		if (atomic_read(&device->pp_in_use) < mxb) {
			page = __drbd_alloc_pages(device, number);
P
Philipp Reisner 已提交
268 269 270 271 272 273 274 275
			if (page)
				break;
		}

		if (!retry)
			break;

		if (signal_pending(current)) {
276
			drbd_warn(device, "drbd_alloc_pages interrupted!\n");
P
Philipp Reisner 已提交
277 278 279 280 281 282 283
			break;
		}

		schedule();
	}
	finish_wait(&drbd_pp_wait, &wait);

284
	if (page)
285
		atomic_add(number, &device->pp_in_use);
P
Philipp Reisner 已提交
286 287 288
	return page;
}

289
/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290
 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
291 292
 * Either links the page chain back to the global pool,
 * or returns all pages to the system. */
293
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
P
Philipp Reisner 已提交
294
{
295
	atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
P
Philipp Reisner 已提交
296
	int i;
297

298 299 300
	if (page == NULL)
		return;

301
	if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302 303 304 305 306 307 308 309
		i = page_chain_free(page);
	else {
		struct page *tmp;
		tmp = page_chain_tail(page, &i);
		spin_lock(&drbd_pp_lock);
		page_chain_add(&drbd_pp_pool, page, tmp);
		drbd_pp_vacant += i;
		spin_unlock(&drbd_pp_lock);
P
Philipp Reisner 已提交
310
	}
311
	i = atomic_sub_return(i, a);
312
	if (i < 0)
313
		drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
314
			is_net ? "pp_in_use_by_net" : "pp_in_use", i);
P
Philipp Reisner 已提交
315 316 317 318 319 320 321 322
	wake_up(&drbd_pp_wait);
}

/*
You need to hold the req_lock:
 _drbd_wait_ee_list_empty()

You must not have the req_lock:
323
 drbd_free_peer_req()
324
 drbd_alloc_peer_req()
325
 drbd_free_peer_reqs()
P
Philipp Reisner 已提交
326
 drbd_ee_fix_bhs()
327
 drbd_finish_peer_reqs()
P
Philipp Reisner 已提交
328 329 330 331
 drbd_clear_done_ee()
 drbd_wait_ee_list_empty()
*/

332
struct drbd_peer_request *
333
drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
334
		    unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
P
Philipp Reisner 已提交
335
{
336
	struct drbd_device *device = peer_device->device;
337
	struct drbd_peer_request *peer_req;
338
	struct page *page = NULL;
339
	unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
P
Philipp Reisner 已提交
340

341
	if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
P
Philipp Reisner 已提交
342 343
		return NULL;

344 345
	peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
	if (!peer_req) {
P
Philipp Reisner 已提交
346
		if (!(gfp_mask & __GFP_NOWARN))
347
			drbd_err(device, "%s: allocation failed\n", __func__);
P
Philipp Reisner 已提交
348 349 350
		return NULL;
	}

351
	if (data_size) {
352
		page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT));
353 354 355
		if (!page)
			goto fail;
	}
P
Philipp Reisner 已提交
356

357 358 359 360 361 362 363
	drbd_clear_interval(&peer_req->i);
	peer_req->i.size = data_size;
	peer_req->i.sector = sector;
	peer_req->i.local = false;
	peer_req->i.waiting = false;

	peer_req->epoch = NULL;
364
	peer_req->peer_device = peer_device;
365 366 367
	peer_req->pages = page;
	atomic_set(&peer_req->pending_bios, 0);
	peer_req->flags = 0;
368 369 370 371
	/*
	 * The block_id is opaque to the receiver.  It is not endianness
	 * converted, and sent back to the sender unchanged.
	 */
372
	peer_req->block_id = id;
P
Philipp Reisner 已提交
373

374
	return peer_req;
P
Philipp Reisner 已提交
375

376
 fail:
377
	mempool_free(peer_req, drbd_ee_mempool);
P
Philipp Reisner 已提交
378 379 380
	return NULL;
}

381
void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
382
		       int is_net)
P
Philipp Reisner 已提交
383
{
384 385
	if (peer_req->flags & EE_HAS_DIGEST)
		kfree(peer_req->digest);
386
	drbd_free_pages(device, peer_req->pages, is_net);
387 388
	D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
	D_ASSERT(device, drbd_interval_empty(&peer_req->i));
389
	mempool_free(peer_req, drbd_ee_mempool);
P
Philipp Reisner 已提交
390 391
}

392
int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
P
Philipp Reisner 已提交
393 394
{
	LIST_HEAD(work_list);
395
	struct drbd_peer_request *peer_req, *t;
P
Philipp Reisner 已提交
396
	int count = 0;
397
	int is_net = list == &device->net_ee;
P
Philipp Reisner 已提交
398

399
	spin_lock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
400
	list_splice_init(list, &work_list);
401
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
402

403
	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
404
		__drbd_free_peer_req(device, peer_req, is_net);
P
Philipp Reisner 已提交
405 406 407 408 409 410
		count++;
	}
	return count;
}

/*
411
 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
P
Philipp Reisner 已提交
412
 */
413
static int drbd_finish_peer_reqs(struct drbd_device *device)
P
Philipp Reisner 已提交
414 415 416
{
	LIST_HEAD(work_list);
	LIST_HEAD(reclaimed);
417
	struct drbd_peer_request *peer_req, *t;
418
	int err = 0;
P
Philipp Reisner 已提交
419

420
	spin_lock_irq(&device->resource->req_lock);
421 422
	reclaim_finished_net_peer_reqs(device, &reclaimed);
	list_splice_init(&device->done_ee, &work_list);
423
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
424

425
	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
426
		drbd_free_net_peer_req(device, peer_req);
P
Philipp Reisner 已提交
427 428

	/* possible callbacks here:
429
	 * e_end_block, and e_end_resync_block, e_send_superseded.
P
Philipp Reisner 已提交
430 431
	 * all ignore the last argument.
	 */
432
	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
433 434
		int err2;

P
Philipp Reisner 已提交
435
		/* list_del not necessary, next/prev members not touched */
436
		err2 = peer_req->w.cb(&peer_req->w, !!err);
437 438
		if (!err)
			err = err2;
439
		drbd_free_peer_req(device, peer_req);
P
Philipp Reisner 已提交
440
	}
441
	wake_up(&device->ee_wait);
P
Philipp Reisner 已提交
442

443
	return err;
P
Philipp Reisner 已提交
444 445
}

446
static void _drbd_wait_ee_list_empty(struct drbd_device *device,
447
				     struct list_head *head)
P
Philipp Reisner 已提交
448 449 450 451 452 453
{
	DEFINE_WAIT(wait);

	/* avoids spin_lock/unlock
	 * and calling prepare_to_wait in the fast path */
	while (!list_empty(head)) {
454
		prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
455
		spin_unlock_irq(&device->resource->req_lock);
J
Jens Axboe 已提交
456
		io_schedule();
457
		finish_wait(&device->ee_wait, &wait);
458
		spin_lock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
459 460 461
	}
}

462
static void drbd_wait_ee_list_empty(struct drbd_device *device,
463
				    struct list_head *head)
P
Philipp Reisner 已提交
464
{
465
	spin_lock_irq(&device->resource->req_lock);
466
	_drbd_wait_ee_list_empty(device, head);
467
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
468 469
}

470
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
P
Philipp Reisner 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
{
	mm_segment_t oldfs;
	struct kvec iov = {
		.iov_base = buf,
		.iov_len = size,
	};
	struct msghdr msg = {
		.msg_iovlen = 1,
		.msg_iov = (struct iovec *)&iov,
		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
	};
	int rv;

	oldfs = get_fs();
	set_fs(KERNEL_DS);
	rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
	set_fs(oldfs);

	return rv;
}

492
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
P
Philipp Reisner 已提交
493 494 495
{
	int rv;

496
	rv = drbd_recv_short(connection->data.socket, buf, size, 0);
P
Philipp Reisner 已提交
497

P
Philipp Reisner 已提交
498 499
	if (rv < 0) {
		if (rv == -ECONNRESET)
500
			drbd_info(connection, "sock was reset by peer\n");
P
Philipp Reisner 已提交
501
		else if (rv != -ERESTARTSYS)
502
			drbd_err(connection, "sock_recvmsg returned %d\n", rv);
P
Philipp Reisner 已提交
503
	} else if (rv == 0) {
504
		if (test_bit(DISCONNECT_SENT, &connection->flags)) {
505 506
			long t;
			rcu_read_lock();
507
			t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
508 509
			rcu_read_unlock();

510
			t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
511

512 513 514
			if (t)
				goto out;
		}
515
		drbd_info(connection, "sock was shut down by peer\n");
516 517
	}

P
Philipp Reisner 已提交
518
	if (rv != size)
519
		conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
P
Philipp Reisner 已提交
520

521
out:
P
Philipp Reisner 已提交
522 523 524
	return rv;
}

525
static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
526 527 528
{
	int err;

529
	err = drbd_recv(connection, buf, size);
530 531 532 533 534 535 536 537
	if (err != size) {
		if (err >= 0)
			err = -EIO;
	} else
		err = 0;
	return err;
}

538
static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
539 540 541
{
	int err;

542
	err = drbd_recv_all(connection, buf, size);
543
	if (err && !signal_pending(current))
544
		drbd_warn(connection, "short read (expected size %d)\n", (int)size);
545 546 547
	return err;
}

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/* quoting tcp(7):
 *   On individual connections, the socket buffer size must be set prior to the
 *   listen(2) or connect(2) calls in order to have it take effect.
 * This is our wrapper to do so.
 */
static void drbd_setbufsize(struct socket *sock, unsigned int snd,
		unsigned int rcv)
{
	/* open coded SO_SNDBUF, SO_RCVBUF */
	if (snd) {
		sock->sk->sk_sndbuf = snd;
		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
	}
	if (rcv) {
		sock->sk->sk_rcvbuf = rcv;
		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
	}
}

567
static struct socket *drbd_try_connect(struct drbd_connection *connection)
P
Philipp Reisner 已提交
568 569 570 571
{
	const char *what;
	struct socket *sock;
	struct sockaddr_in6 src_in6;
572 573 574
	struct sockaddr_in6 peer_in6;
	struct net_conf *nc;
	int err, peer_addr_len, my_addr_len;
575
	int sndbuf_size, rcvbuf_size, connect_int;
P
Philipp Reisner 已提交
576 577
	int disconnect_on_error = 1;

578
	rcu_read_lock();
579
	nc = rcu_dereference(connection->net_conf);
580 581
	if (!nc) {
		rcu_read_unlock();
P
Philipp Reisner 已提交
582
		return NULL;
583 584 585
	}
	sndbuf_size = nc->sndbuf_size;
	rcvbuf_size = nc->rcvbuf_size;
586
	connect_int = nc->connect_int;
587
	rcu_read_unlock();
588

589 590
	my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
	memcpy(&src_in6, &connection->my_addr, my_addr_len);
591

592
	if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
593 594 595 596
		src_in6.sin6_port = 0;
	else
		((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */

597 598
	peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
	memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
P
Philipp Reisner 已提交
599 600

	what = "sock_create_kern";
601 602
	err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
			       SOCK_STREAM, IPPROTO_TCP, &sock);
P
Philipp Reisner 已提交
603 604 605 606 607 608
	if (err < 0) {
		sock = NULL;
		goto out;
	}

	sock->sk->sk_rcvtimeo =
609
	sock->sk->sk_sndtimeo = connect_int * HZ;
610
	drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
P
Philipp Reisner 已提交
611 612 613 614 615 616 617 618 619

       /* explicitly bind to the configured IP as source IP
	*  for the outgoing connections.
	*  This is needed for multihomed hosts and to be
	*  able to use lo: interfaces for drbd.
	* Make sure to use 0 as port number, so linux selects
	*  a free one dynamically.
	*/
	what = "bind before connect";
620
	err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
P
Philipp Reisner 已提交
621 622 623 624 625 626 627
	if (err < 0)
		goto out;

	/* connect may fail, peer not yet available.
	 * stay C_WF_CONNECTION, don't go Disconnecting! */
	disconnect_on_error = 0;
	what = "connect";
628
	err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
P
Philipp Reisner 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645

out:
	if (err < 0) {
		if (sock) {
			sock_release(sock);
			sock = NULL;
		}
		switch (-err) {
			/* timeout, busy, signal pending */
		case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
		case EINTR: case ERESTARTSYS:
			/* peer not (yet) available, network problem */
		case ECONNREFUSED: case ENETUNREACH:
		case EHOSTDOWN:    case EHOSTUNREACH:
			disconnect_on_error = 0;
			break;
		default:
646
			drbd_err(connection, "%s failed, err = %d\n", what, err);
P
Philipp Reisner 已提交
647 648
		}
		if (disconnect_on_error)
649
			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
P
Philipp Reisner 已提交
650
	}
651

P
Philipp Reisner 已提交
652 653 654
	return sock;
}

655
struct accept_wait_data {
656
	struct drbd_connection *connection;
657 658 659 660 661 662
	struct socket *s_listen;
	struct completion door_bell;
	void (*original_sk_state_change)(struct sock *sk);

};

663
static void drbd_incoming_connection(struct sock *sk)
664 665
{
	struct accept_wait_data *ad = sk->sk_user_data;
666
	void (*state_change)(struct sock *sk);
667

668 669 670 671
	state_change = ad->original_sk_state_change;
	if (sk->sk_state == TCP_ESTABLISHED)
		complete(&ad->door_bell);
	state_change(sk);
672 673
}

674
static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
P
Philipp Reisner 已提交
675
{
676
	int err, sndbuf_size, rcvbuf_size, my_addr_len;
677
	struct sockaddr_in6 my_addr;
678
	struct socket *s_listen;
679
	struct net_conf *nc;
P
Philipp Reisner 已提交
680 681
	const char *what;

682
	rcu_read_lock();
683
	nc = rcu_dereference(connection->net_conf);
684 685
	if (!nc) {
		rcu_read_unlock();
686
		return -EIO;
687 688 689 690
	}
	sndbuf_size = nc->sndbuf_size;
	rcvbuf_size = nc->rcvbuf_size;
	rcu_read_unlock();
P
Philipp Reisner 已提交
691

692 693
	my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
	memcpy(&my_addr, &connection->my_addr, my_addr_len);
P
Philipp Reisner 已提交
694 695

	what = "sock_create_kern";
696
	err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
697
			       SOCK_STREAM, IPPROTO_TCP, &s_listen);
P
Philipp Reisner 已提交
698 699 700 701 702
	if (err) {
		s_listen = NULL;
		goto out;
	}

703
	s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
704
	drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
P
Philipp Reisner 已提交
705 706

	what = "bind before listen";
707
	err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
P
Philipp Reisner 已提交
708 709 710
	if (err < 0)
		goto out;

711 712 713
	ad->s_listen = s_listen;
	write_lock_bh(&s_listen->sk->sk_callback_lock);
	ad->original_sk_state_change = s_listen->sk->sk_state_change;
714
	s_listen->sk->sk_state_change = drbd_incoming_connection;
715 716
	s_listen->sk->sk_user_data = ad;
	write_unlock_bh(&s_listen->sk->sk_callback_lock);
P
Philipp Reisner 已提交
717

718 719 720 721 722
	what = "listen";
	err = s_listen->ops->listen(s_listen, 5);
	if (err < 0)
		goto out;

723
	return 0;
P
Philipp Reisner 已提交
724 725 726 727 728
out:
	if (s_listen)
		sock_release(s_listen);
	if (err < 0) {
		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
729
			drbd_err(connection, "%s failed, err = %d\n", what, err);
730
			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
P
Philipp Reisner 已提交
731 732 733
		}
	}

734
	return -EIO;
P
Philipp Reisner 已提交
735 736
}

737
static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
P
Philipp Reisner 已提交
738
{
739 740 741 742
	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_state_change = ad->original_sk_state_change;
	sk->sk_user_data = NULL;
	write_unlock_bh(&sk->sk_callback_lock);
P
Philipp Reisner 已提交
743 744
}

745
static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
P
Philipp Reisner 已提交
746
{
747 748 749 750 751
	int timeo, connect_int, err = 0;
	struct socket *s_estab = NULL;
	struct net_conf *nc;

	rcu_read_lock();
752
	nc = rcu_dereference(connection->net_conf);
753 754 755 756 757 758 759 760
	if (!nc) {
		rcu_read_unlock();
		return NULL;
	}
	connect_int = nc->connect_int;
	rcu_read_unlock();

	timeo = connect_int * HZ;
761 762
	/* 28.5% random jitter */
	timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
763

764 765 766
	err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
	if (err <= 0)
		return NULL;
P
Philipp Reisner 已提交
767

768
	err = kernel_accept(ad->s_listen, &s_estab, 0);
P
Philipp Reisner 已提交
769 770
	if (err < 0) {
		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
771
			drbd_err(connection, "accept failed, err = %d\n", err);
772
			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
P
Philipp Reisner 已提交
773 774 775
		}
	}

776 777
	if (s_estab)
		unregister_state_change(s_estab->sk, ad);
P
Philipp Reisner 已提交
778 779 780 781

	return s_estab;
}

782
static int decode_header(struct drbd_connection *, void *, struct packet_info *);
P
Philipp Reisner 已提交
783

784
static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
785 786
			     enum drbd_packet cmd)
{
787
	if (!conn_prepare_command(connection, sock))
788
		return -EIO;
789
	return conn_send_command(connection, sock, cmd, 0, NULL, 0);
P
Philipp Reisner 已提交
790 791
}

792
static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
P
Philipp Reisner 已提交
793
{
794
	unsigned int header_size = drbd_header_size(connection);
795 796
	struct packet_info pi;
	int err;
P
Philipp Reisner 已提交
797

798
	err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
799 800 801 802 803
	if (err != header_size) {
		if (err >= 0)
			err = -EIO;
		return err;
	}
804
	err = decode_header(connection, connection->data.rbuf, &pi);
805 806 807
	if (err)
		return err;
	return pi.cmd;
P
Philipp Reisner 已提交
808 809 810 811 812 813
}

/**
 * drbd_socket_okay() - Free the socket if its connection is not okay
 * @sock:	pointer to the pointer to the socket.
 */
814
static int drbd_socket_okay(struct socket **sock)
P
Philipp Reisner 已提交
815 816 817 818 819
{
	int rr;
	char tb[4];

	if (!*sock)
820
		return false;
P
Philipp Reisner 已提交
821

822
	rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
P
Philipp Reisner 已提交
823 824

	if (rr > 0 || rr == -EAGAIN) {
825
		return true;
P
Philipp Reisner 已提交
826 827 828
	} else {
		sock_release(*sock);
		*sock = NULL;
829
		return false;
P
Philipp Reisner 已提交
830 831
	}
}
832 833
/* Gets called if a connection is established, or if a new minor gets created
   in a connection */
834
int drbd_connected(struct drbd_peer_device *peer_device)
835
{
836
	struct drbd_device *device = peer_device->device;
837
	int err;
838

839 840
	atomic_set(&device->packet_seq, 0);
	device->peer_seq = 0;
841

842 843
	device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
		&peer_device->connection->cstate_mutex :
844
		&device->own_state_mutex;
845

846
	err = drbd_send_sync_param(peer_device);
847
	if (!err)
848
		err = drbd_send_sizes(peer_device, 0, 0);
849
	if (!err)
850
		err = drbd_send_uuids(peer_device);
851
	if (!err)
852
		err = drbd_send_current_state(peer_device);
853 854 855 856
	clear_bit(USE_DEGR_WFC_T, &device->flags);
	clear_bit(RESIZE_PENDING, &device->flags);
	atomic_set(&device->ap_in_flight, 0);
	mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
857
	return err;
858
}
P
Philipp Reisner 已提交
859 860 861 862 863 864 865 866 867

/*
 * return values:
 *   1 yes, we have a valid connection
 *   0 oops, did not work out, please try again
 *  -1 peer talks different language,
 *     no point in trying again, please go standalone.
 *  -2 We do not have a network config...
 */
868
static int conn_connect(struct drbd_connection *connection)
P
Philipp Reisner 已提交
869
{
870
	struct drbd_socket sock, msock;
871
	struct drbd_peer_device *peer_device;
872
	struct net_conf *nc;
873
	int vnr, timeout, h, ok;
874
	bool discard_my_data;
875
	enum drbd_state_rv rv;
876
	struct accept_wait_data ad = {
877
		.connection = connection,
878 879
		.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
	};
P
Philipp Reisner 已提交
880

881 882
	clear_bit(DISCONNECT_SENT, &connection->flags);
	if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
P
Philipp Reisner 已提交
883 884
		return -2;

885
	mutex_init(&sock.mutex);
886 887
	sock.sbuf = connection->data.sbuf;
	sock.rbuf = connection->data.rbuf;
888 889
	sock.socket = NULL;
	mutex_init(&msock.mutex);
890 891
	msock.sbuf = connection->meta.sbuf;
	msock.rbuf = connection->meta.rbuf;
892 893
	msock.socket = NULL;

894
	/* Assume that the peer only understands protocol 80 until we know better.  */
895
	connection->agreed_pro_version = 80;
P
Philipp Reisner 已提交
896

897
	if (prepare_listen_socket(connection, &ad))
898
		return 0;
P
Philipp Reisner 已提交
899 900

	do {
901
		struct socket *s;
P
Philipp Reisner 已提交
902

903
		s = drbd_try_connect(connection);
P
Philipp Reisner 已提交
904
		if (s) {
905 906
			if (!sock.socket) {
				sock.socket = s;
907
				send_first_packet(connection, &sock, P_INITIAL_DATA);
908
			} else if (!msock.socket) {
909
				clear_bit(RESOLVE_CONFLICTS, &connection->flags);
910
				msock.socket = s;
911
				send_first_packet(connection, &msock, P_INITIAL_META);
P
Philipp Reisner 已提交
912
			} else {
913
				drbd_err(connection, "Logic error in conn_connect()\n");
P
Philipp Reisner 已提交
914 915 916 917
				goto out_release_sockets;
			}
		}

918 919
		if (sock.socket && msock.socket) {
			rcu_read_lock();
920
			nc = rcu_dereference(connection->net_conf);
921 922 923 924 925
			timeout = nc->ping_timeo * HZ / 10;
			rcu_read_unlock();
			schedule_timeout_interruptible(timeout);
			ok = drbd_socket_okay(&sock.socket);
			ok = drbd_socket_okay(&msock.socket) && ok;
P
Philipp Reisner 已提交
926 927 928 929 930
			if (ok)
				break;
		}

retry:
931
		s = drbd_wait_for_connect(connection, &ad);
P
Philipp Reisner 已提交
932
		if (s) {
933
			int fp = receive_first_packet(connection, s);
934 935
			drbd_socket_okay(&sock.socket);
			drbd_socket_okay(&msock.socket);
936
			switch (fp) {
937
			case P_INITIAL_DATA:
938
				if (sock.socket) {
939
					drbd_warn(connection, "initial packet S crossed\n");
940
					sock_release(sock.socket);
941 942
					sock.socket = s;
					goto randomize;
P
Philipp Reisner 已提交
943
				}
944
				sock.socket = s;
P
Philipp Reisner 已提交
945
				break;
946
			case P_INITIAL_META:
947
				set_bit(RESOLVE_CONFLICTS, &connection->flags);
948
				if (msock.socket) {
949
					drbd_warn(connection, "initial packet M crossed\n");
950
					sock_release(msock.socket);
951 952
					msock.socket = s;
					goto randomize;
P
Philipp Reisner 已提交
953
				}
954
				msock.socket = s;
P
Philipp Reisner 已提交
955 956
				break;
			default:
957
				drbd_warn(connection, "Error receiving initial packet\n");
P
Philipp Reisner 已提交
958
				sock_release(s);
959
randomize:
960
				if (prandom_u32() & 1)
P
Philipp Reisner 已提交
961 962 963 964
					goto retry;
			}
		}

965
		if (connection->cstate <= C_DISCONNECTING)
P
Philipp Reisner 已提交
966 967 968 969
			goto out_release_sockets;
		if (signal_pending(current)) {
			flush_signals(current);
			smp_rmb();
970
			if (get_t_state(&connection->receiver) == EXITING)
P
Philipp Reisner 已提交
971 972 973
				goto out_release_sockets;
		}

974 975 976
		ok = drbd_socket_okay(&sock.socket);
		ok = drbd_socket_okay(&msock.socket) && ok;
	} while (!ok);
P
Philipp Reisner 已提交
977

978 979
	if (ad.s_listen)
		sock_release(ad.s_listen);
P
Philipp Reisner 已提交
980

981 982
	sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
	msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
P
Philipp Reisner 已提交
983

984 985
	sock.socket->sk->sk_allocation = GFP_NOIO;
	msock.socket->sk->sk_allocation = GFP_NOIO;
P
Philipp Reisner 已提交
986

987 988
	sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
	msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
P
Philipp Reisner 已提交
989 990

	/* NOT YET ...
991
	 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
992
	 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
993
	 * first set it to the P_CONNECTION_FEATURES timeout,
P
Philipp Reisner 已提交
994
	 * which we set to 4x the configured ping_timeout. */
995
	rcu_read_lock();
996
	nc = rcu_dereference(connection->net_conf);
997

998 999
	sock.socket->sk->sk_sndtimeo =
	sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
P
Philipp Reisner 已提交
1000

1001
	msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1002
	timeout = nc->timeout * HZ / 10;
1003
	discard_my_data = nc->discard_my_data;
1004
	rcu_read_unlock();
P
Philipp Reisner 已提交
1005

1006
	msock.socket->sk->sk_sndtimeo = timeout;
P
Philipp Reisner 已提交
1007 1008

	/* we don't want delays.
L
Lucas De Marchi 已提交
1009
	 * we use TCP_CORK where appropriate, though */
1010 1011
	drbd_tcp_nodelay(sock.socket);
	drbd_tcp_nodelay(msock.socket);
P
Philipp Reisner 已提交
1012

1013 1014 1015
	connection->data.socket = sock.socket;
	connection->meta.socket = msock.socket;
	connection->last_received = jiffies;
P
Philipp Reisner 已提交
1016

1017
	h = drbd_do_features(connection);
P
Philipp Reisner 已提交
1018 1019 1020
	if (h <= 0)
		return h;

1021
	if (connection->cram_hmac_tfm) {
1022
		/* drbd_request_state(device, NS(conn, WFAuth)); */
1023
		switch (drbd_do_auth(connection)) {
1024
		case -1:
1025
			drbd_err(connection, "Authentication of peer failed\n");
P
Philipp Reisner 已提交
1026
			return -1;
1027
		case 0:
1028
			drbd_err(connection, "Authentication of peer failed, trying again.\n");
1029
			return 0;
P
Philipp Reisner 已提交
1030 1031 1032
		}
	}

1033 1034
	connection->data.socket->sk->sk_sndtimeo = timeout;
	connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
P
Philipp Reisner 已提交
1035

1036
	if (drbd_send_protocol(connection) == -EOPNOTSUPP)
1037
		return -1;
P
Philipp Reisner 已提交
1038

1039
	set_bit(STATE_SENT, &connection->flags);
1040

P
Philipp Reisner 已提交
1041
	rcu_read_lock();
1042 1043
	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
		struct drbd_device *device = peer_device->device;
1044
		kref_get(&device->kref);
1045 1046
		rcu_read_unlock();

1047 1048 1049 1050 1051 1052 1053
		/* Prevent a race between resync-handshake and
		 * being promoted to Primary.
		 *
		 * Grab and release the state mutex, so we know that any current
		 * drbd_set_role() is finished, and any incoming drbd_set_role
		 * will see the STATE_SENT flag, and wait for it to be cleared.
		 */
1054 1055
		mutex_lock(device->state_mutex);
		mutex_unlock(device->state_mutex);
1056

1057
		if (discard_my_data)
1058
			set_bit(DISCARD_MY_DATA, &device->flags);
1059
		else
1060
			clear_bit(DISCARD_MY_DATA, &device->flags);
1061

1062
		drbd_connected(peer_device);
1063
		kref_put(&device->kref, drbd_destroy_device);
P
Philipp Reisner 已提交
1064 1065 1066 1067
		rcu_read_lock();
	}
	rcu_read_unlock();

1068 1069 1070
	rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
	if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
		clear_bit(STATE_SENT, &connection->flags);
1071
		return 0;
1072
	}
1073

1074
	drbd_thread_start(&connection->asender);
P
Philipp Reisner 已提交
1075

1076
	mutex_lock(&connection->resource->conf_update);
1077 1078 1079 1080
	/* The discard_my_data flag is a single-shot modifier to the next
	 * connection attempt, the handshake of which is now well underway.
	 * No need for rcu style copying of the whole struct
	 * just to clear a single value. */
1081
	connection->net_conf->discard_my_data = 0;
1082
	mutex_unlock(&connection->resource->conf_update);
1083

1084
	return h;
P
Philipp Reisner 已提交
1085 1086

out_release_sockets:
1087 1088
	if (ad.s_listen)
		sock_release(ad.s_listen);
1089 1090 1091 1092
	if (sock.socket)
		sock_release(sock.socket);
	if (msock.socket)
		sock_release(msock.socket);
P
Philipp Reisner 已提交
1093 1094 1095
	return -1;
}

1096
static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
P
Philipp Reisner 已提交
1097
{
1098
	unsigned int header_size = drbd_header_size(connection);
1099

1100 1101 1102 1103
	if (header_size == sizeof(struct p_header100) &&
	    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
		struct p_header100 *h = header;
		if (h->pad != 0) {
1104
			drbd_err(connection, "Header padding is not zero\n");
1105 1106 1107 1108 1109 1110 1111
			return -EINVAL;
		}
		pi->vnr = be16_to_cpu(h->volume);
		pi->cmd = be16_to_cpu(h->command);
		pi->size = be32_to_cpu(h->length);
	} else if (header_size == sizeof(struct p_header95) &&
		   *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1112 1113
		struct p_header95 *h = header;
		pi->cmd = be16_to_cpu(h->command);
1114 1115
		pi->size = be32_to_cpu(h->length);
		pi->vnr = 0;
1116 1117 1118 1119 1120
	} else if (header_size == sizeof(struct p_header80) &&
		   *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
		struct p_header80 *h = header;
		pi->cmd = be16_to_cpu(h->command);
		pi->size = be16_to_cpu(h->length);
1121
		pi->vnr = 0;
1122
	} else {
1123
		drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
1124
			 be32_to_cpu(*(__be32 *)header),
1125
			 connection->agreed_pro_version);
1126
		return -EINVAL;
P
Philipp Reisner 已提交
1127
	}
1128
	pi->data = header + header_size;
1129
	return 0;
1130
}
P
Philipp Reisner 已提交
1131

1132
static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
1133
{
1134
	void *buffer = connection->data.rbuf;
1135
	int err;
1136

1137
	err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
1138
	if (err)
1139
		return err;
1140

1141 1142
	err = decode_header(connection, buffer, pi);
	connection->last_received = jiffies;
P
Philipp Reisner 已提交
1143

1144
	return err;
P
Philipp Reisner 已提交
1145 1146
}

1147
static void drbd_flush(struct drbd_connection *connection)
P
Philipp Reisner 已提交
1148 1149
{
	int rv;
1150
	struct drbd_peer_device *peer_device;
1151 1152
	int vnr;

1153
	if (connection->write_ordering >= WO_bdev_flush) {
1154
		rcu_read_lock();
1155 1156 1157
		idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
			struct drbd_device *device = peer_device->device;

1158
			if (!get_ldev(device))
1159
				continue;
1160
			kref_get(&device->kref);
1161 1162
			rcu_read_unlock();

1163
			rv = blkdev_issue_flush(device->ldev->backing_bdev,
1164 1165
					GFP_NOIO, NULL);
			if (rv) {
1166
				drbd_info(device, "local disk flush failed with status %d\n", rv);
1167 1168 1169
				/* would rather check on EOPNOTSUPP, but that is not reliable.
				 * don't try again for ANY return value != 0
				 * if (rv == -EOPNOTSUPP) */
1170
				drbd_bump_write_ordering(connection, WO_drain_io);
1171
			}
1172
			put_ldev(device);
1173
			kref_put(&device->kref, drbd_destroy_device);
P
Philipp Reisner 已提交
1174

1175 1176 1177
			rcu_read_lock();
			if (rv)
				break;
P
Philipp Reisner 已提交
1178
		}
1179
		rcu_read_unlock();
P
Philipp Reisner 已提交
1180 1181 1182 1183 1184
	}
}

/**
 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1185
 * @device:	DRBD device.
P
Philipp Reisner 已提交
1186 1187 1188
 * @epoch:	Epoch object.
 * @ev:		Epoch event.
 */
1189
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
P
Philipp Reisner 已提交
1190 1191 1192
					       struct drbd_epoch *epoch,
					       enum epoch_event ev)
{
1193
	int epoch_size;
P
Philipp Reisner 已提交
1194 1195 1196
	struct drbd_epoch *next_epoch;
	enum finish_epoch rv = FE_STILL_LIVE;

1197
	spin_lock(&connection->epoch_lock);
P
Philipp Reisner 已提交
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	do {
		next_epoch = NULL;

		epoch_size = atomic_read(&epoch->epoch_size);

		switch (ev & ~EV_CLEANUP) {
		case EV_PUT:
			atomic_dec(&epoch->active);
			break;
		case EV_GOT_BARRIER_NR:
			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
			break;
		case EV_BECAME_LAST:
			/* nothing to do*/
			break;
		}

		if (epoch_size != 0 &&
		    atomic_read(&epoch->active) == 0 &&
1217
		    (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
P
Philipp Reisner 已提交
1218
			if (!(ev & EV_CLEANUP)) {
1219 1220 1221
				spin_unlock(&connection->epoch_lock);
				drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
				spin_lock(&connection->epoch_lock);
P
Philipp Reisner 已提交
1222
			}
1223 1224 1225
#if 0
			/* FIXME: dec unacked on connection, once we have
			 * something to count pending connection packets in. */
1226
			if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1227
				dec_unacked(epoch->connection);
1228
#endif
P
Philipp Reisner 已提交
1229

1230
			if (connection->current_epoch != epoch) {
P
Philipp Reisner 已提交
1231 1232 1233
				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
				list_del(&epoch->list);
				ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1234
				connection->epochs--;
P
Philipp Reisner 已提交
1235 1236 1237 1238 1239 1240 1241
				kfree(epoch);

				if (rv == FE_STILL_LIVE)
					rv = FE_DESTROYED;
			} else {
				epoch->flags = 0;
				atomic_set(&epoch->epoch_size, 0);
1242
				/* atomic_set(&epoch->active, 0); is already zero */
P
Philipp Reisner 已提交
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
				if (rv == FE_STILL_LIVE)
					rv = FE_RECYCLED;
			}
		}

		if (!next_epoch)
			break;

		epoch = next_epoch;
	} while (1);

1254
	spin_unlock(&connection->epoch_lock);
P
Philipp Reisner 已提交
1255 1256 1257 1258 1259 1260

	return rv;
}

/**
 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1261
 * @connection:	DRBD connection.
P
Philipp Reisner 已提交
1262 1263
 * @wo:		Write ordering method to try.
 */
1264
void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo)
P
Philipp Reisner 已提交
1265
{
P
Philipp Reisner 已提交
1266
	struct disk_conf *dc;
1267
	struct drbd_peer_device *peer_device;
P
Philipp Reisner 已提交
1268
	enum write_ordering_e pwo;
1269
	int vnr;
P
Philipp Reisner 已提交
1270 1271 1272 1273 1274 1275
	static char *write_ordering_str[] = {
		[WO_none] = "none",
		[WO_drain_io] = "drain",
		[WO_bdev_flush] = "flush",
	};

1276
	pwo = connection->write_ordering;
P
Philipp Reisner 已提交
1277
	wo = min(pwo, wo);
P
Philipp Reisner 已提交
1278
	rcu_read_lock();
1279 1280 1281
	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
		struct drbd_device *device = peer_device->device;

1282
		if (!get_ldev_if_state(device, D_ATTACHING))
1283
			continue;
1284
		dc = rcu_dereference(device->ldev->disk_conf);
1285 1286 1287 1288 1289

		if (wo == WO_bdev_flush && !dc->disk_flushes)
			wo = WO_drain_io;
		if (wo == WO_drain_io && !dc->disk_drain)
			wo = WO_none;
1290
		put_ldev(device);
1291
	}
P
Philipp Reisner 已提交
1292
	rcu_read_unlock();
1293 1294
	connection->write_ordering = wo;
	if (pwo != connection->write_ordering || wo == WO_bdev_flush)
1295
		drbd_info(connection, "Method to ensure write ordering: %s\n", write_ordering_str[connection->write_ordering]);
P
Philipp Reisner 已提交
1296 1297
}

1298
/**
1299
 * drbd_submit_peer_request()
1300
 * @device:	DRBD device.
1301
 * @peer_req:	peer request
1302
 * @rw:		flag field, see bio->bi_rw
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
 *
 * May spread the pages to multiple bios,
 * depending on bio_add_page restrictions.
 *
 * Returns 0 if all bios have been submitted,
 * -ENOMEM if we could not allocate enough bios,
 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
 *  single page to an empty bio (which should never happen and likely indicates
 *  that the lower level IO stack is in some way broken). This has been observed
 *  on certain Xen deployments.
1313 1314
 */
/* TODO allocate from our own bio_set. */
1315
int drbd_submit_peer_request(struct drbd_device *device,
1316 1317
			     struct drbd_peer_request *peer_req,
			     const unsigned rw, const int fault_type)
1318 1319 1320
{
	struct bio *bios = NULL;
	struct bio *bio;
1321 1322 1323
	struct page *page = peer_req->pages;
	sector_t sector = peer_req->i.sector;
	unsigned ds = peer_req->i.size;
1324 1325
	unsigned n_bios = 0;
	unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1326
	int err = -ENOMEM;
1327 1328 1329 1330

	/* In most cases, we will only need one bio.  But in case the lower
	 * level restrictions happen to be different at this offset on this
	 * side than those of the sending peer, we may need to submit the
1331 1332 1333 1334 1335
	 * request in more than one bio.
	 *
	 * Plain bio_alloc is good enough here, this is no DRBD internally
	 * generated bio, but a bio allocated on behalf of the peer.
	 */
1336 1337 1338
next_bio:
	bio = bio_alloc(GFP_NOIO, nr_pages);
	if (!bio) {
1339
		drbd_err(device, "submit_ee: Allocation of a bio failed\n");
1340 1341
		goto fail;
	}
1342
	/* > peer_req->i.sector, unless this is the first bio */
1343
	bio->bi_iter.bi_sector = sector;
1344
	bio->bi_bdev = device->ldev->backing_bdev;
1345
	bio->bi_rw = rw;
1346
	bio->bi_private = peer_req;
1347
	bio->bi_end_io = drbd_peer_request_endio;
1348 1349 1350 1351 1352 1353 1354 1355

	bio->bi_next = bios;
	bios = bio;
	++n_bios;

	page_chain_for_each(page) {
		unsigned len = min_t(unsigned, ds, PAGE_SIZE);
		if (!bio_add_page(bio, page, len, 0)) {
1356 1357 1358 1359
			/* A single page must always be possible!
			 * But in case it fails anyways,
			 * we deal with it, and complain (below). */
			if (bio->bi_vcnt == 0) {
1360
				drbd_err(device,
1361 1362
					"bio_add_page failed for len=%u, "
					"bi_vcnt=0 (bi_sector=%llu)\n",
1363
					len, (uint64_t)bio->bi_iter.bi_sector);
1364 1365 1366
				err = -ENOSPC;
				goto fail;
			}
1367 1368 1369 1370 1371 1372
			goto next_bio;
		}
		ds -= len;
		sector += len >> 9;
		--nr_pages;
	}
1373 1374
	D_ASSERT(device, page == NULL);
	D_ASSERT(device, ds == 0);
1375

1376
	atomic_set(&peer_req->pending_bios, n_bios);
1377 1378 1379 1380 1381
	do {
		bio = bios;
		bios = bios->bi_next;
		bio->bi_next = NULL;

1382
		drbd_generic_make_request(device, fault_type, bio);
1383 1384 1385 1386 1387 1388 1389 1390 1391
	} while (bios);
	return 0;

fail:
	while (bios) {
		bio = bios;
		bios = bios->bi_next;
		bio_put(bio);
	}
1392
	return err;
1393 1394
}

1395
static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1396
					     struct drbd_peer_request *peer_req)
1397
{
1398
	struct drbd_interval *i = &peer_req->i;
1399

1400
	drbd_remove_interval(&device->write_requests, i);
1401 1402
	drbd_clear_interval(i);

A
Andreas Gruenbacher 已提交
1403
	/* Wake up any processes waiting for this peer request to complete.  */
1404
	if (i->waiting)
1405
		wake_up(&device->misc_wait);
1406 1407
}

1408
static void conn_wait_active_ee_empty(struct drbd_connection *connection)
1409
{
1410
	struct drbd_peer_device *peer_device;
1411 1412 1413
	int vnr;

	rcu_read_lock();
1414 1415 1416
	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
		struct drbd_device *device = peer_device->device;

1417
		kref_get(&device->kref);
1418
		rcu_read_unlock();
1419
		drbd_wait_ee_list_empty(device, &device->active_ee);
1420
		kref_put(&device->kref, drbd_destroy_device);
1421 1422 1423 1424 1425
		rcu_read_lock();
	}
	rcu_read_unlock();
}

1426 1427 1428 1429 1430 1431
static struct drbd_peer_device *
conn_peer_device(struct drbd_connection *connection, int volume_number)
{
	return idr_find(&connection->peer_devices, volume_number);
}

1432
static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
1433
{
1434
	int rv;
1435
	struct p_barrier *p = pi->data;
P
Philipp Reisner 已提交
1436 1437
	struct drbd_epoch *epoch;

1438 1439 1440
	/* FIXME these are unacked on connection,
	 * not a specific (peer)device.
	 */
1441 1442 1443
	connection->current_epoch->barrier_nr = p->barrier;
	connection->current_epoch->connection = connection;
	rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
P
Philipp Reisner 已提交
1444 1445 1446 1447 1448 1449

	/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
	 * the activity log, which means it would not be resynced in case the
	 * R_PRIMARY crashes now.
	 * Therefore we must send the barrier_ack after the barrier request was
	 * completed. */
1450
	switch (connection->write_ordering) {
P
Philipp Reisner 已提交
1451 1452
	case WO_none:
		if (rv == FE_RECYCLED)
1453
			return 0;
1454 1455 1456 1457 1458 1459 1460

		/* receiver context, in the writeout path of the other node.
		 * avoid potential distributed deadlock */
		epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
		if (epoch)
			break;
		else
1461
			drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
1462
			/* Fall through */
P
Philipp Reisner 已提交
1463 1464 1465

	case WO_bdev_flush:
	case WO_drain_io:
1466 1467
		conn_wait_active_ee_empty(connection);
		drbd_flush(connection);
1468

1469
		if (atomic_read(&connection->current_epoch->epoch_size)) {
1470 1471 1472
			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
			if (epoch)
				break;
P
Philipp Reisner 已提交
1473 1474
		}

1475
		return 0;
1476
	default:
1477
		drbd_err(connection, "Strangeness in connection->write_ordering %d\n", connection->write_ordering);
1478
		return -EIO;
P
Philipp Reisner 已提交
1479 1480 1481 1482 1483 1484
	}

	epoch->flags = 0;
	atomic_set(&epoch->epoch_size, 0);
	atomic_set(&epoch->active, 0);

1485 1486 1487 1488 1489
	spin_lock(&connection->epoch_lock);
	if (atomic_read(&connection->current_epoch->epoch_size)) {
		list_add(&epoch->list, &connection->current_epoch->list);
		connection->current_epoch = epoch;
		connection->epochs++;
P
Philipp Reisner 已提交
1490 1491 1492 1493
	} else {
		/* The current_epoch got recycled while we allocated this one... */
		kfree(epoch);
	}
1494
	spin_unlock(&connection->epoch_lock);
P
Philipp Reisner 已提交
1495

1496
	return 0;
P
Philipp Reisner 已提交
1497 1498 1499 1500
}

/* used from receive_RSDataReply (recv_resync_read)
 * and from receive_Data */
1501
static struct drbd_peer_request *
1502
read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1503
	      int data_size) __must_hold(local)
P
Philipp Reisner 已提交
1504
{
1505
	struct drbd_device *device = peer_device->device;
1506
	const sector_t capacity = drbd_get_capacity(device->this_bdev);
1507
	struct drbd_peer_request *peer_req;
P
Philipp Reisner 已提交
1508
	struct page *page;
1509
	int dgs, ds, err;
1510 1511
	void *dig_in = peer_device->connection->int_dig_in;
	void *dig_vv = peer_device->connection->int_dig_vv;
1512
	unsigned long *data;
P
Philipp Reisner 已提交
1513

1514
	dgs = 0;
1515 1516
	if (peer_device->connection->peer_integrity_tfm) {
		dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
1517 1518 1519 1520
		/*
		 * FIXME: Receive the incoming digest into the receive buffer
		 *	  here, together with its struct p_data?
		 */
1521
		err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
1522
		if (err)
P
Philipp Reisner 已提交
1523
			return NULL;
1524
		data_size -= dgs;
P
Philipp Reisner 已提交
1525 1526
	}

1527 1528 1529 1530
	if (!expect(IS_ALIGNED(data_size, 512)))
		return NULL;
	if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
		return NULL;
P
Philipp Reisner 已提交
1531

1532 1533 1534
	/* even though we trust out peer,
	 * we sometimes have to double check. */
	if (sector + (data_size>>9) > capacity) {
1535
		drbd_err(device, "request from peer beyond end of local disk: "
1536
			"capacity: %llus < sector: %llus + size: %u\n",
1537 1538 1539 1540 1541
			(unsigned long long)capacity,
			(unsigned long long)sector, data_size);
		return NULL;
	}

P
Philipp Reisner 已提交
1542 1543 1544
	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
	 * "criss-cross" setup, that might cause write-out on some other DRBD,
	 * which in turn might block on the other node at this very place.  */
1545
	peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, GFP_NOIO);
1546
	if (!peer_req)
P
Philipp Reisner 已提交
1547
		return NULL;
1548

1549
	if (!data_size)
1550
		return peer_req;
1551

P
Philipp Reisner 已提交
1552
	ds = data_size;
1553
	page = peer_req->pages;
1554 1555
	page_chain_for_each(page) {
		unsigned len = min_t(int, ds, PAGE_SIZE);
1556
		data = kmap(page);
1557
		err = drbd_recv_all_warn(peer_device->connection, data, len);
1558
		if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1559
			drbd_err(device, "Fault injection: Corrupting data on receive\n");
1560 1561
			data[0] = data[0] ^ (unsigned long)-1;
		}
P
Philipp Reisner 已提交
1562
		kunmap(page);
1563
		if (err) {
1564
			drbd_free_peer_req(device, peer_req);
P
Philipp Reisner 已提交
1565 1566
			return NULL;
		}
1567
		ds -= len;
P
Philipp Reisner 已提交
1568 1569 1570
	}

	if (dgs) {
1571
		drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv);
P
Philipp Reisner 已提交
1572
		if (memcmp(dig_in, dig_vv, dgs)) {
1573
			drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1574
				(unsigned long long)sector, data_size);
1575
			drbd_free_peer_req(device, peer_req);
P
Philipp Reisner 已提交
1576 1577 1578
			return NULL;
		}
	}
1579
	device->recv_cnt += data_size>>9;
1580
	return peer_req;
P
Philipp Reisner 已提交
1581 1582 1583 1584 1585
}

/* drbd_drain_block() just takes a data block
 * out of the socket input buffer, and discards it.
 */
1586
static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
P
Philipp Reisner 已提交
1587 1588
{
	struct page *page;
1589
	int err = 0;
P
Philipp Reisner 已提交
1590 1591
	void *data;

1592
	if (!data_size)
1593
		return 0;
1594

1595
	page = drbd_alloc_pages(peer_device, 1, 1);
P
Philipp Reisner 已提交
1596 1597 1598

	data = kmap(page);
	while (data_size) {
1599 1600
		unsigned int len = min_t(int, data_size, PAGE_SIZE);

1601
		err = drbd_recv_all_warn(peer_device->connection, data, len);
1602
		if (err)
P
Philipp Reisner 已提交
1603
			break;
1604
		data_size -= len;
P
Philipp Reisner 已提交
1605 1606
	}
	kunmap(page);
1607
	drbd_free_pages(peer_device->device, page, 0);
1608
	return err;
P
Philipp Reisner 已提交
1609 1610
}

1611
static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
P
Philipp Reisner 已提交
1612 1613
			   sector_t sector, int data_size)
{
1614 1615
	struct bio_vec bvec;
	struct bvec_iter iter;
P
Philipp Reisner 已提交
1616
	struct bio *bio;
1617
	int dgs, err, expect;
1618 1619
	void *dig_in = peer_device->connection->int_dig_in;
	void *dig_vv = peer_device->connection->int_dig_vv;
P
Philipp Reisner 已提交
1620

1621
	dgs = 0;
1622 1623 1624
	if (peer_device->connection->peer_integrity_tfm) {
		dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
		err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
1625 1626
		if (err)
			return err;
1627
		data_size -= dgs;
P
Philipp Reisner 已提交
1628 1629 1630 1631
	}

	/* optimistically update recv_cnt.  if receiving fails below,
	 * we disconnect anyways, and counters will be reset. */
1632
	peer_device->device->recv_cnt += data_size>>9;
P
Philipp Reisner 已提交
1633 1634

	bio = req->master_bio;
1635
	D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
P
Philipp Reisner 已提交
1636

1637 1638 1639
	bio_for_each_segment(bvec, bio, iter) {
		void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
		expect = min_t(int, data_size, bvec.bv_len);
1640
		err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
1641
		kunmap(bvec.bv_page);
1642 1643 1644
		if (err)
			return err;
		data_size -= expect;
P
Philipp Reisner 已提交
1645 1646 1647
	}

	if (dgs) {
1648
		drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
P
Philipp Reisner 已提交
1649
		if (memcmp(dig_in, dig_vv, dgs)) {
1650
			drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
1651
			return -EINVAL;
P
Philipp Reisner 已提交
1652 1653 1654
		}
	}

1655
	D_ASSERT(peer_device->device, data_size == 0);
1656
	return 0;
P
Philipp Reisner 已提交
1657 1658
}

1659 1660 1661 1662
/*
 * e_end_resync_block() is called in asender context via
 * drbd_finish_peer_reqs().
 */
1663
static int e_end_resync_block(struct drbd_work *w, int unused)
P
Philipp Reisner 已提交
1664
{
1665
	struct drbd_peer_request *peer_req =
1666 1667 1668
		container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
1669
	sector_t sector = peer_req->i.sector;
1670
	int err;
P
Philipp Reisner 已提交
1671

1672
	D_ASSERT(device, drbd_interval_empty(&peer_req->i));
P
Philipp Reisner 已提交
1673

1674
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1675
		drbd_set_in_sync(device, sector, peer_req->i.size);
1676
		err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
P
Philipp Reisner 已提交
1677 1678
	} else {
		/* Record failure to sync */
1679
		drbd_rs_failed_io(device, sector, peer_req->i.size);
P
Philipp Reisner 已提交
1680

1681
		err  = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
P
Philipp Reisner 已提交
1682
	}
1683
	dec_unacked(device);
P
Philipp Reisner 已提交
1684

1685
	return err;
P
Philipp Reisner 已提交
1686 1687
}

1688 1689
static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
			    int data_size) __releases(local)
P
Philipp Reisner 已提交
1690
{
1691
	struct drbd_device *device = peer_device->device;
1692
	struct drbd_peer_request *peer_req;
P
Philipp Reisner 已提交
1693

1694
	peer_req = read_in_block(peer_device, ID_SYNCER, sector, data_size);
1695
	if (!peer_req)
1696
		goto fail;
P
Philipp Reisner 已提交
1697

1698
	dec_rs_pending(device);
P
Philipp Reisner 已提交
1699

1700
	inc_unacked(device);
P
Philipp Reisner 已提交
1701 1702 1703
	/* corresponding dec_unacked() in e_end_resync_block()
	 * respective _drbd_clear_done_ee */

1704
	peer_req->w.cb = e_end_resync_block;
1705

1706
	spin_lock_irq(&device->resource->req_lock);
1707
	list_add(&peer_req->w.list, &device->sync_ee);
1708
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
1709

1710 1711
	atomic_add(data_size >> 9, &device->rs_sect_ev);
	if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1712
		return 0;
P
Philipp Reisner 已提交
1713

1714
	/* don't care for the reason here */
1715
	drbd_err(device, "submit failed, triggering re-connect\n");
1716
	spin_lock_irq(&device->resource->req_lock);
1717
	list_del(&peer_req->w.list);
1718
	spin_unlock_irq(&device->resource->req_lock);
1719

1720
	drbd_free_peer_req(device, peer_req);
1721
fail:
1722
	put_ldev(device);
1723
	return -EIO;
P
Philipp Reisner 已提交
1724 1725
}

1726
static struct drbd_request *
1727
find_request(struct drbd_device *device, struct rb_root *root, u64 id,
1728
	     sector_t sector, bool missing_ok, const char *func)
1729 1730 1731
{
	struct drbd_request *req;

1732 1733
	/* Request object according to our peer */
	req = (struct drbd_request *)(unsigned long)id;
1734
	if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1735
		return req;
1736
	if (!missing_ok) {
1737
		drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
1738 1739
			(unsigned long)id, (unsigned long long)sector);
	}
1740
	return NULL;
P
Philipp Reisner 已提交
1741 1742
}

1743
static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
1744
{
1745
	struct drbd_peer_device *peer_device;
1746
	struct drbd_device *device;
P
Philipp Reisner 已提交
1747 1748
	struct drbd_request *req;
	sector_t sector;
1749
	int err;
1750
	struct p_data *p = pi->data;
1751

1752 1753
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
1754
		return -EIO;
1755
	device = peer_device->device;
P
Philipp Reisner 已提交
1756 1757 1758

	sector = be64_to_cpu(p->sector);

1759
	spin_lock_irq(&device->resource->req_lock);
1760
	req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
1761
	spin_unlock_irq(&device->resource->req_lock);
1762
	if (unlikely(!req))
1763
		return -EIO;
P
Philipp Reisner 已提交
1764

B
Bart Van Assche 已提交
1765
	/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
P
Philipp Reisner 已提交
1766 1767
	 * special casing it there for the various failure cases.
	 * still no race with drbd_fail_pending_reads */
1768
	err = recv_dless_read(peer_device, req, sector, pi->size);
1769
	if (!err)
1770
		req_mod(req, DATA_RECEIVED);
P
Philipp Reisner 已提交
1771 1772 1773 1774
	/* else: nothing. handled from drbd_disconnect...
	 * I don't think we may complete this just yet
	 * in case we are "on-disconnect: freeze" */

1775
	return err;
P
Philipp Reisner 已提交
1776 1777
}

1778
static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
1779
{
1780
	struct drbd_peer_device *peer_device;
1781
	struct drbd_device *device;
P
Philipp Reisner 已提交
1782
	sector_t sector;
1783
	int err;
1784
	struct p_data *p = pi->data;
1785

1786 1787
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
1788
		return -EIO;
1789
	device = peer_device->device;
P
Philipp Reisner 已提交
1790 1791

	sector = be64_to_cpu(p->sector);
1792
	D_ASSERT(device, p->block_id == ID_SYNCER);
P
Philipp Reisner 已提交
1793

1794
	if (get_ldev(device)) {
P
Philipp Reisner 已提交
1795 1796
		/* data is submitted to disk within recv_resync_read.
		 * corresponding put_ldev done below on error,
1797
		 * or in drbd_peer_request_endio. */
1798
		err = recv_resync_read(peer_device, sector, pi->size);
P
Philipp Reisner 已提交
1799 1800
	} else {
		if (__ratelimit(&drbd_ratelimit_state))
1801
			drbd_err(device, "Can not write resync data to local disk.\n");
P
Philipp Reisner 已提交
1802

1803
		err = drbd_drain_block(peer_device, pi->size);
P
Philipp Reisner 已提交
1804

1805
		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
P
Philipp Reisner 已提交
1806 1807
	}

1808
	atomic_add(pi->size >> 9, &device->rs_sect_in);
1809

1810
	return err;
P
Philipp Reisner 已提交
1811 1812
}

1813
static void restart_conflicting_writes(struct drbd_device *device,
1814
				       sector_t sector, int size)
P
Philipp Reisner 已提交
1815
{
1816 1817 1818
	struct drbd_interval *i;
	struct drbd_request *req;

1819
	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
1820 1821 1822 1823 1824 1825
		if (!i->local)
			continue;
		req = container_of(i, struct drbd_request, i);
		if (req->rq_state & RQ_LOCAL_PENDING ||
		    !(req->rq_state & RQ_POSTPONED))
			continue;
1826 1827
		/* as it is RQ_POSTPONED, this will cause it to
		 * be queued on the retry workqueue. */
1828
		__req_mod(req, CONFLICT_RESOLVED, NULL);
1829 1830
	}
}
P
Philipp Reisner 已提交
1831

1832 1833
/*
 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
P
Philipp Reisner 已提交
1834
 */
1835
static int e_end_block(struct drbd_work *w, int cancel)
P
Philipp Reisner 已提交
1836
{
1837
	struct drbd_peer_request *peer_req =
1838 1839 1840
		container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
1841
	sector_t sector = peer_req->i.sector;
1842
	int err = 0, pcmd;
P
Philipp Reisner 已提交
1843

1844
	if (peer_req->flags & EE_SEND_WRITE_ACK) {
1845
		if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1846 1847
			pcmd = (device->state.conn >= C_SYNC_SOURCE &&
				device->state.conn <= C_PAUSED_SYNC_T &&
1848
				peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P
Philipp Reisner 已提交
1849
				P_RS_WRITE_ACK : P_WRITE_ACK;
1850
			err = drbd_send_ack(peer_device, pcmd, peer_req);
P
Philipp Reisner 已提交
1851
			if (pcmd == P_RS_WRITE_ACK)
1852
				drbd_set_in_sync(device, sector, peer_req->i.size);
P
Philipp Reisner 已提交
1853
		} else {
1854
			err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
P
Philipp Reisner 已提交
1855 1856 1857
			/* we expect it to be marked out of sync anyways...
			 * maybe assert this?  */
		}
1858
		dec_unacked(device);
P
Philipp Reisner 已提交
1859 1860 1861
	}
	/* we delete from the conflict detection hash _after_ we sent out the
	 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1862
	if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1863
		spin_lock_irq(&device->resource->req_lock);
1864
		D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
1865
		drbd_remove_epoch_entry_interval(device, peer_req);
1866
		if (peer_req->flags & EE_RESTART_REQUESTS)
1867
			restart_conflicting_writes(device, sector, peer_req->i.size);
1868
		spin_unlock_irq(&device->resource->req_lock);
1869
	} else
1870
		D_ASSERT(device, drbd_interval_empty(&peer_req->i));
P
Philipp Reisner 已提交
1871

1872
	drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
P
Philipp Reisner 已提交
1873

1874
	return err;
P
Philipp Reisner 已提交
1875 1876
}

1877
static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
P
Philipp Reisner 已提交
1878
{
1879
	struct drbd_peer_request *peer_req =
1880 1881
		container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_device *peer_device = peer_req->peer_device;
1882
	int err;
P
Philipp Reisner 已提交
1883

1884 1885
	err = drbd_send_ack(peer_device, ack, peer_req);
	dec_unacked(peer_device->device);
P
Philipp Reisner 已提交
1886

1887
	return err;
P
Philipp Reisner 已提交
1888 1889
}

1890
static int e_send_superseded(struct drbd_work *w, int unused)
1891
{
1892
	return e_send_ack(w, P_SUPERSEDED);
1893 1894
}

1895
static int e_send_retry_write(struct drbd_work *w, int unused)
1896
{
1897 1898 1899
	struct drbd_peer_request *peer_req =
		container_of(w, struct drbd_peer_request, w);
	struct drbd_connection *connection = peer_req->peer_device->connection;
1900

1901
	return e_send_ack(w, connection->agreed_pro_version >= 100 ?
1902
			     P_RETRY_WRITE : P_SUPERSEDED);
1903
}
P
Philipp Reisner 已提交
1904

1905 1906 1907 1908 1909 1910 1911 1912 1913
static bool seq_greater(u32 a, u32 b)
{
	/*
	 * We assume 32-bit wrap-around here.
	 * For 24-bit wrap-around, we would have to shift:
	 *  a <<= 8; b <<= 8;
	 */
	return (s32)a - (s32)b > 0;
}
P
Philipp Reisner 已提交
1914

1915 1916 1917
static u32 seq_max(u32 a, u32 b)
{
	return seq_greater(a, b) ? a : b;
P
Philipp Reisner 已提交
1918 1919
}

1920
static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
1921
{
1922
	struct drbd_device *device = peer_device->device;
1923
	unsigned int newest_peer_seq;
1924

1925
	if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
1926 1927 1928 1929 1930
		spin_lock(&device->peer_seq_lock);
		newest_peer_seq = seq_max(device->peer_seq, peer_seq);
		device->peer_seq = newest_peer_seq;
		spin_unlock(&device->peer_seq_lock);
		/* wake up only if we actually changed device->peer_seq */
1931
		if (peer_seq == newest_peer_seq)
1932
			wake_up(&device->seq_wait);
1933
	}
P
Philipp Reisner 已提交
1934 1935
}

1936
static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1937
{
1938 1939
	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
}
1940

1941
/* maybe change sync_ee into interval trees as well? */
1942
static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
1943 1944
{
	struct drbd_peer_request *rs_req;
1945 1946
	bool rv = 0;

1947
	spin_lock_irq(&device->resource->req_lock);
1948
	list_for_each_entry(rs_req, &device->sync_ee, w.list) {
1949 1950
		if (overlaps(peer_req->i.sector, peer_req->i.size,
			     rs_req->i.sector, rs_req->i.size)) {
1951 1952 1953 1954
			rv = 1;
			break;
		}
	}
1955
	spin_unlock_irq(&device->resource->req_lock);
1956 1957 1958 1959

	return rv;
}

P
Philipp Reisner 已提交
1960 1961 1962 1963 1964 1965 1966 1967 1968
/* Called from receive_Data.
 * Synchronize packets on sock with packets on msock.
 *
 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
 * packet traveling on msock, they are still processed in the order they have
 * been sent.
 *
 * Note: we don't care for Ack packets overtaking P_DATA packets.
 *
1969
 * In case packet_seq is larger than device->peer_seq number, there are
P
Philipp Reisner 已提交
1970
 * outstanding packets on the msock. We wait for them to arrive.
1971
 * In case we are the logically next packet, we update device->peer_seq
P
Philipp Reisner 已提交
1972 1973 1974 1975 1976 1977 1978 1979 1980
 * ourselves. Correctly handles 32bit wrap around.
 *
 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
 *
 * returns 0 if we may process the packet,
 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1981
static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
P
Philipp Reisner 已提交
1982
{
1983
	struct drbd_device *device = peer_device->device;
P
Philipp Reisner 已提交
1984 1985
	DEFINE_WAIT(wait);
	long timeout;
1986
	int ret = 0, tp;
1987

1988
	if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
1989 1990
		return 0;

1991
	spin_lock(&device->peer_seq_lock);
P
Philipp Reisner 已提交
1992
	for (;;) {
1993 1994
		if (!seq_greater(peer_seq - 1, device->peer_seq)) {
			device->peer_seq = seq_max(device->peer_seq, peer_seq);
P
Philipp Reisner 已提交
1995
			break;
1996
		}
1997

P
Philipp Reisner 已提交
1998 1999 2000 2001
		if (signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
2002 2003

		rcu_read_lock();
2004
		tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
2005 2006 2007 2008 2009 2010
		rcu_read_unlock();

		if (!tp)
			break;

		/* Only need to wait if two_primaries is enabled */
2011 2012
		prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
		spin_unlock(&device->peer_seq_lock);
2013
		rcu_read_lock();
2014
		timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
2015
		rcu_read_unlock();
2016
		timeout = schedule_timeout(timeout);
2017
		spin_lock(&device->peer_seq_lock);
2018
		if (!timeout) {
P
Philipp Reisner 已提交
2019
			ret = -ETIMEDOUT;
2020
			drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
P
Philipp Reisner 已提交
2021 2022 2023
			break;
		}
	}
2024 2025
	spin_unlock(&device->peer_seq_lock);
	finish_wait(&device->seq_wait, &wait);
P
Philipp Reisner 已提交
2026 2027 2028
	return ret;
}

2029 2030 2031
/* see also bio_flags_to_wire()
 * DRBD_REQ_*, because we need to semantically map the flags to data packet
 * flags and back. We may replicate to other kernel versions. */
2032
static unsigned long wire_flags_to_bio(struct drbd_device *device, u32 dpf)
2033
{
2034 2035 2036 2037
	return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
		(dpf & DP_FUA ? REQ_FUA : 0) |
		(dpf & DP_FLUSH ? REQ_FLUSH : 0) |
		(dpf & DP_DISCARD ? REQ_DISCARD : 0);
2038 2039
}

2040
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2041 2042 2043 2044 2045
				    unsigned int size)
{
	struct drbd_interval *i;

    repeat:
2046
	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
		struct drbd_request *req;
		struct bio_and_error m;

		if (!i->local)
			continue;
		req = container_of(i, struct drbd_request, i);
		if (!(req->rq_state & RQ_POSTPONED))
			continue;
		req->rq_state &= ~RQ_POSTPONED;
		__req_mod(req, NEG_ACKED, &m);
2057
		spin_unlock_irq(&device->resource->req_lock);
2058
		if (m.bio)
2059
			complete_master_bio(device, &m);
2060
		spin_lock_irq(&device->resource->req_lock);
2061 2062 2063 2064
		goto repeat;
	}
}

2065
static int handle_write_conflicts(struct drbd_device *device,
2066 2067
				  struct drbd_peer_request *peer_req)
{
2068
	struct drbd_connection *connection = first_peer_device(device)->connection;
2069
	bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
	sector_t sector = peer_req->i.sector;
	const unsigned int size = peer_req->i.size;
	struct drbd_interval *i;
	bool equal;
	int err;

	/*
	 * Inserting the peer request into the write_requests tree will prevent
	 * new conflicting local requests from being added.
	 */
2080
	drbd_insert_interval(&device->write_requests, &peer_req->i);
2081 2082

    repeat:
2083
	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2084 2085 2086 2087 2088 2089 2090 2091 2092
		if (i == &peer_req->i)
			continue;

		if (!i->local) {
			/*
			 * Our peer has sent a conflicting remote request; this
			 * should not happen in a two-node setup.  Wait for the
			 * earlier peer request to complete.
			 */
2093
			err = drbd_wait_misc(device, i);
2094 2095 2096 2097 2098 2099 2100 2101 2102
			if (err)
				goto out;
			goto repeat;
		}

		equal = i->sector == sector && i->size == size;
		if (resolve_conflicts) {
			/*
			 * If the peer request is fully contained within the
2103 2104 2105
			 * overlapping request, it can be considered overwritten
			 * and thus superseded; otherwise, it will be retried
			 * once all overlapping requests have completed.
2106
			 */
2107
			bool superseded = i->sector <= sector && i->sector +
2108 2109 2110
				       (i->size >> 9) >= sector + (size >> 9);

			if (!equal)
2111
				drbd_alert(device, "Concurrent writes detected: "
2112 2113 2114 2115
					       "local=%llus +%u, remote=%llus +%u, "
					       "assuming %s came first\n",
					  (unsigned long long)i->sector, i->size,
					  (unsigned long long)sector, size,
2116
					  superseded ? "local" : "remote");
2117

2118
			inc_unacked(device);
2119
			peer_req->w.cb = superseded ? e_send_superseded :
2120
						   e_send_retry_write;
2121
			list_add_tail(&peer_req->w.list, &device->done_ee);
2122
			wake_asender(first_peer_device(device)->connection);
2123 2124 2125 2126 2127 2128 2129 2130

			err = -ENOENT;
			goto out;
		} else {
			struct drbd_request *req =
				container_of(i, struct drbd_request, i);

			if (!equal)
2131
				drbd_alert(device, "Concurrent writes detected: "
2132 2133 2134 2135 2136 2137 2138 2139
					       "local=%llus +%u, remote=%llus +%u\n",
					  (unsigned long long)i->sector, i->size,
					  (unsigned long long)sector, size);

			if (req->rq_state & RQ_LOCAL_PENDING ||
			    !(req->rq_state & RQ_POSTPONED)) {
				/*
				 * Wait for the node with the discard flag to
2140 2141 2142
				 * decide if this request has been superseded
				 * or needs to be retried.
				 * Requests that have been superseded will
2143 2144 2145 2146 2147 2148
				 * disappear from the write_requests tree.
				 *
				 * In addition, wait for the conflicting
				 * request to finish locally before submitting
				 * the conflicting peer request.
				 */
2149
				err = drbd_wait_misc(device, &req->i);
2150
				if (err) {
2151
					_conn_request_state(first_peer_device(device)->connection,
2152 2153
							    NS(conn, C_TIMEOUT),
							    CS_HARD);
2154
					fail_postponed_requests(device, sector, size);
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
					goto out;
				}
				goto repeat;
			}
			/*
			 * Remember to restart the conflicting requests after
			 * the new peer request has completed.
			 */
			peer_req->flags |= EE_RESTART_REQUESTS;
		}
	}
	err = 0;

    out:
	if (err)
2170
		drbd_remove_epoch_entry_interval(device, peer_req);
2171 2172 2173
	return err;
}

P
Philipp Reisner 已提交
2174
/* mirrored write */
2175
static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
2176
{
2177
	struct drbd_peer_device *peer_device;
2178
	struct drbd_device *device;
P
Philipp Reisner 已提交
2179
	sector_t sector;
2180
	struct drbd_peer_request *peer_req;
2181
	struct p_data *p = pi->data;
2182
	u32 peer_seq = be32_to_cpu(p->seq_num);
P
Philipp Reisner 已提交
2183 2184
	int rw = WRITE;
	u32 dp_flags;
2185
	int err, tp;
P
Philipp Reisner 已提交
2186

2187 2188
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
2189
		return -EIO;
2190
	device = peer_device->device;
P
Philipp Reisner 已提交
2191

2192
	if (!get_ldev(device)) {
2193 2194
		int err2;

2195 2196
		err = wait_for_and_update_peer_seq(peer_device, peer_seq);
		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2197
		atomic_inc(&connection->current_epoch->epoch_size);
2198
		err2 = drbd_drain_block(peer_device, pi->size);
2199 2200 2201
		if (!err)
			err = err2;
		return err;
P
Philipp Reisner 已提交
2202 2203
	}

2204 2205 2206 2207 2208
	/*
	 * Corresponding put_ldev done either below (on various errors), or in
	 * drbd_peer_request_endio, if we successfully submit the data at the
	 * end of this function.
	 */
P
Philipp Reisner 已提交
2209 2210

	sector = be64_to_cpu(p->sector);
2211
	peer_req = read_in_block(peer_device, p->block_id, sector, pi->size);
2212
	if (!peer_req) {
2213
		put_ldev(device);
2214
		return -EIO;
P
Philipp Reisner 已提交
2215 2216
	}

2217
	peer_req->w.cb = e_end_block;
P
Philipp Reisner 已提交
2218

2219
	dp_flags = be32_to_cpu(p->dp_flags);
2220
	rw |= wire_flags_to_bio(device, dp_flags);
2221
	if (peer_req->pages == NULL) {
2222 2223
		D_ASSERT(device, peer_req->i.size == 0);
		D_ASSERT(device, dp_flags & DP_FLUSH);
2224
	}
2225 2226

	if (dp_flags & DP_MAY_SET_IN_SYNC)
2227
		peer_req->flags |= EE_MAY_SET_IN_SYNC;
2228

2229 2230
	spin_lock(&connection->epoch_lock);
	peer_req->epoch = connection->current_epoch;
2231 2232
	atomic_inc(&peer_req->epoch->epoch_size);
	atomic_inc(&peer_req->epoch->active);
2233
	spin_unlock(&connection->epoch_lock);
P
Philipp Reisner 已提交
2234

2235
	rcu_read_lock();
2236
	tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
2237 2238 2239
	rcu_read_unlock();
	if (tp) {
		peer_req->flags |= EE_IN_INTERVAL_TREE;
2240
		err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2241
		if (err)
P
Philipp Reisner 已提交
2242
			goto out_interrupted;
2243
		spin_lock_irq(&device->resource->req_lock);
2244
		err = handle_write_conflicts(device, peer_req);
2245
		if (err) {
2246
			spin_unlock_irq(&device->resource->req_lock);
2247
			if (err == -ENOENT) {
2248
				put_ldev(device);
2249
				return 0;
P
Philipp Reisner 已提交
2250
			}
2251
			goto out_interrupted;
P
Philipp Reisner 已提交
2252
		}
2253
	} else {
2254
		update_peer_seq(peer_device, peer_seq);
2255
		spin_lock_irq(&device->resource->req_lock);
2256
	}
2257
	list_add(&peer_req->w.list, &device->active_ee);
2258
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
2259

2260 2261
	if (device->state.conn == C_SYNC_TARGET)
		wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
P
Philipp Reisner 已提交
2262

2263
	if (peer_device->connection->agreed_pro_version < 100) {
2264
		rcu_read_lock();
2265
		switch (rcu_dereference(peer_device->connection->net_conf)->wire_protocol) {
2266 2267 2268 2269 2270 2271
		case DRBD_PROT_C:
			dp_flags |= DP_SEND_WRITE_ACK;
			break;
		case DRBD_PROT_B:
			dp_flags |= DP_SEND_RECEIVE_ACK;
			break;
P
Philipp Reisner 已提交
2272
		}
2273
		rcu_read_unlock();
P
Philipp Reisner 已提交
2274 2275
	}

2276 2277
	if (dp_flags & DP_SEND_WRITE_ACK) {
		peer_req->flags |= EE_SEND_WRITE_ACK;
2278
		inc_unacked(device);
P
Philipp Reisner 已提交
2279 2280
		/* corresponding dec_unacked() in e_end_block()
		 * respective _drbd_clear_done_ee */
2281 2282 2283
	}

	if (dp_flags & DP_SEND_RECEIVE_ACK) {
P
Philipp Reisner 已提交
2284 2285
		/* I really don't like it that the receiver thread
		 * sends on the msock, but anyways */
2286
		drbd_send_ack(first_peer_device(device), P_RECV_ACK, peer_req);
P
Philipp Reisner 已提交
2287 2288
	}

2289
	if (device->state.pdsk < D_INCONSISTENT) {
P
Philipp Reisner 已提交
2290
		/* In case we have the only disk of the cluster, */
2291
		drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2292 2293
		peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
		peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2294
		drbd_al_begin_io(device, &peer_req->i, true);
P
Philipp Reisner 已提交
2295 2296
	}

2297
	err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
2298 2299
	if (!err)
		return 0;
P
Philipp Reisner 已提交
2300

2301
	/* don't care for the reason here */
2302
	drbd_err(device, "submit failed, triggering re-connect\n");
2303
	spin_lock_irq(&device->resource->req_lock);
2304
	list_del(&peer_req->w.list);
2305
	drbd_remove_epoch_entry_interval(device, peer_req);
2306
	spin_unlock_irq(&device->resource->req_lock);
2307
	if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2308
		drbd_al_complete_io(device, &peer_req->i);
2309

P
Philipp Reisner 已提交
2310
out_interrupted:
2311
	drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
2312 2313
	put_ldev(device);
	drbd_free_peer_req(device, peer_req);
2314
	return err;
P
Philipp Reisner 已提交
2315 2316
}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
/* We may throttle resync, if the lower device seems to be busy,
 * and current sync rate is above c_min_rate.
 *
 * To decide whether or not the lower device is busy, we use a scheme similar
 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
 * (more than 64 sectors) of activity we cannot account for with our own resync
 * activity, it obviously is "busy".
 *
 * The current sync rate used here uses only the most recent two step marks,
 * to have a short time average so we can react faster.
 */
2328
int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
2329
{
2330
	struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2331
	unsigned long db, dt, dbdt;
2332
	struct lc_element *tmp;
2333 2334
	int curr_events;
	int throttle = 0;
P
Philipp Reisner 已提交
2335 2336 2337
	unsigned int c_min_rate;

	rcu_read_lock();
2338
	c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
P
Philipp Reisner 已提交
2339
	rcu_read_unlock();
2340 2341

	/* feature disabled? */
P
Philipp Reisner 已提交
2342
	if (c_min_rate == 0)
2343 2344
		return 0;

2345 2346
	spin_lock_irq(&device->al_lock);
	tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2347 2348 2349
	if (tmp) {
		struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
		if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2350
			spin_unlock_irq(&device->al_lock);
2351 2352 2353 2354
			return 0;
		}
		/* Do not slow down if app IO is already waiting for this extent */
	}
2355
	spin_unlock_irq(&device->al_lock);
2356

2357 2358
	curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
		      (int)part_stat_read(&disk->part0, sectors[1]) -
2359
			atomic_read(&device->rs_sect_ev);
2360

2361
	if (!device->rs_last_events || curr_events - device->rs_last_events > 64) {
2362 2363 2364
		unsigned long rs_left;
		int i;

2365
		device->rs_last_events = curr_events;
2366 2367 2368

		/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
		 * approx. */
2369
		i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2370

2371 2372
		if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
			rs_left = device->ov_left;
2373
		else
2374
			rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2375

2376
		dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2377 2378
		if (!dt)
			dt++;
2379
		db = device->rs_mark_left[i] - rs_left;
2380 2381
		dbdt = Bit2KB(db/dt);

P
Philipp Reisner 已提交
2382
		if (dbdt > c_min_rate)
2383 2384 2385 2386 2387 2388
			throttle = 1;
	}
	return throttle;
}


2389
static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
2390
{
2391
	struct drbd_peer_device *peer_device;
2392
	struct drbd_device *device;
P
Philipp Reisner 已提交
2393
	sector_t sector;
2394
	sector_t capacity;
2395
	struct drbd_peer_request *peer_req;
P
Philipp Reisner 已提交
2396
	struct digest_info *di = NULL;
2397
	int size, verb;
P
Philipp Reisner 已提交
2398
	unsigned int fault_type;
2399
	struct p_block_req *p =	pi->data;
2400

2401 2402
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
2403
		return -EIO;
2404
	device = peer_device->device;
2405
	capacity = drbd_get_capacity(device->this_bdev);
P
Philipp Reisner 已提交
2406 2407 2408 2409

	sector = be64_to_cpu(p->sector);
	size   = be32_to_cpu(p->blksize);

2410
	if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2411
		drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
P
Philipp Reisner 已提交
2412
				(unsigned long long)sector, size);
2413
		return -EINVAL;
P
Philipp Reisner 已提交
2414 2415
	}
	if (sector + (size>>9) > capacity) {
2416
		drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
P
Philipp Reisner 已提交
2417
				(unsigned long long)sector, size);
2418
		return -EINVAL;
P
Philipp Reisner 已提交
2419 2420
	}

2421
	if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2422
		verb = 1;
2423
		switch (pi->cmd) {
2424
		case P_DATA_REQUEST:
2425
			drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
2426 2427 2428 2429
			break;
		case P_RS_DATA_REQUEST:
		case P_CSUM_RS_REQUEST:
		case P_OV_REQUEST:
2430
			drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
2431 2432 2433
			break;
		case P_OV_REPLY:
			verb = 0;
2434
			dec_rs_pending(device);
2435
			drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2436 2437
			break;
		default:
2438
			BUG();
2439 2440
		}
		if (verb && __ratelimit(&drbd_ratelimit_state))
2441
			drbd_err(device, "Can not satisfy peer's read request, "
P
Philipp Reisner 已提交
2442
			    "no local data.\n");
2443

L
Lars Ellenberg 已提交
2444
		/* drain possibly payload */
2445
		return drbd_drain_block(peer_device, pi->size);
P
Philipp Reisner 已提交
2446 2447 2448 2449 2450
	}

	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
	 * "criss-cross" setup, that might cause write-out on some other DRBD,
	 * which in turn might block on the other node at this very place.  */
2451
	peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, GFP_NOIO);
2452
	if (!peer_req) {
2453
		put_ldev(device);
2454
		return -ENOMEM;
P
Philipp Reisner 已提交
2455 2456
	}

2457
	switch (pi->cmd) {
P
Philipp Reisner 已提交
2458
	case P_DATA_REQUEST:
2459
		peer_req->w.cb = w_e_end_data_req;
P
Philipp Reisner 已提交
2460
		fault_type = DRBD_FAULT_DT_RD;
2461 2462 2463
		/* application IO, don't drbd_rs_begin_io */
		goto submit;

P
Philipp Reisner 已提交
2464
	case P_RS_DATA_REQUEST:
2465
		peer_req->w.cb = w_e_end_rsdata_req;
P
Philipp Reisner 已提交
2466
		fault_type = DRBD_FAULT_RS_RD;
2467
		/* used in the sector offset progress display */
2468
		device->bm_resync_fo = BM_SECT_TO_BIT(sector);
P
Philipp Reisner 已提交
2469 2470 2471 2472 2473
		break;

	case P_OV_REPLY:
	case P_CSUM_RS_REQUEST:
		fault_type = DRBD_FAULT_RS_RD;
2474
		di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
P
Philipp Reisner 已提交
2475 2476 2477
		if (!di)
			goto out_free_e;

2478
		di->digest_size = pi->size;
P
Philipp Reisner 已提交
2479 2480
		di->digest = (((char *)di)+sizeof(struct digest_info));

2481 2482
		peer_req->digest = di;
		peer_req->flags |= EE_HAS_DIGEST;
2483

2484
		if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
P
Philipp Reisner 已提交
2485 2486
			goto out_free_e;

2487
		if (pi->cmd == P_CSUM_RS_REQUEST) {
2488
			D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2489
			peer_req->w.cb = w_e_end_csum_rs_req;
2490
			/* used in the sector offset progress display */
2491
			device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2492
		} else if (pi->cmd == P_OV_REPLY) {
2493
			/* track progress, we may need to throttle */
2494
			atomic_add(size >> 9, &device->rs_sect_in);
2495
			peer_req->w.cb = w_e_end_ov_reply;
2496
			dec_rs_pending(device);
2497 2498 2499
			/* drbd_rs_begin_io done when we sent this request,
			 * but accounting still needs to be done. */
			goto submit_for_resync;
P
Philipp Reisner 已提交
2500 2501 2502 2503
		}
		break;

	case P_OV_REQUEST:
2504
		if (device->ov_start_sector == ~(sector_t)0 &&
2505
		    peer_device->connection->agreed_pro_version >= 90) {
2506 2507
			unsigned long now = jiffies;
			int i;
2508 2509 2510 2511
			device->ov_start_sector = sector;
			device->ov_position = sector;
			device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
			device->rs_total = device->ov_left;
2512
			for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2513 2514
				device->rs_mark_left[i] = device->ov_left;
				device->rs_mark_time[i] = now;
2515
			}
2516
			drbd_info(device, "Online Verify start sector: %llu\n",
P
Philipp Reisner 已提交
2517 2518
					(unsigned long long)sector);
		}
2519
		peer_req->w.cb = w_e_end_ov_req;
P
Philipp Reisner 已提交
2520 2521 2522 2523
		fault_type = DRBD_FAULT_RS_RD;
		break;

	default:
2524
		BUG();
P
Philipp Reisner 已提交
2525 2526
	}

2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
	/* Throttle, drbd_rs_begin_io and submit should become asynchronous
	 * wrt the receiver, but it is not as straightforward as it may seem.
	 * Various places in the resync start and stop logic assume resync
	 * requests are processed in order, requeuing this on the worker thread
	 * introduces a bunch of new code for synchronization between threads.
	 *
	 * Unlimited throttling before drbd_rs_begin_io may stall the resync
	 * "forever", throttling after drbd_rs_begin_io will lock that extent
	 * for application writes for the same time.  For now, just throttle
	 * here, where the rest of the code expects the receiver to sleep for
	 * a while, anyways.
	 */

	/* Throttle before drbd_rs_begin_io, as that locks out application IO;
	 * this defers syncer requests for some time, before letting at least
	 * on request through.  The resync controller on the receiving side
	 * will adapt to the incoming rate accordingly.
	 *
	 * We cannot throttle here if remote is Primary/SyncTarget:
	 * we would also throttle its application reads.
	 * In that case, throttling is done on the SyncTarget only.
	 */
2549
	if (device->state.peer != R_PRIMARY && drbd_rs_should_slow_down(device, sector))
2550
		schedule_timeout_uninterruptible(HZ/10);
2551
	if (drbd_rs_begin_io(device, sector))
2552
		goto out_free_e;
P
Philipp Reisner 已提交
2553

2554
submit_for_resync:
2555
	atomic_add(size >> 9, &device->rs_sect_ev);
2556

2557
submit:
2558
	inc_unacked(device);
2559
	spin_lock_irq(&device->resource->req_lock);
2560
	list_add_tail(&peer_req->w.list, &device->read_ee);
2561
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
2562

2563
	if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
2564
		return 0;
P
Philipp Reisner 已提交
2565

2566
	/* don't care for the reason here */
2567
	drbd_err(device, "submit failed, triggering re-connect\n");
2568
	spin_lock_irq(&device->resource->req_lock);
2569
	list_del(&peer_req->w.list);
2570
	spin_unlock_irq(&device->resource->req_lock);
2571 2572
	/* no drbd_rs_complete_io(), we are dropping the connection anyways */

P
Philipp Reisner 已提交
2573
out_free_e:
2574 2575
	put_ldev(device);
	drbd_free_peer_req(device, peer_req);
2576
	return -EIO;
P
Philipp Reisner 已提交
2577 2578
}

2579 2580 2581 2582
/**
 * drbd_asb_recover_0p  -  Recover after split-brain with no remaining primaries
 */
static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
P
Philipp Reisner 已提交
2583
{
2584
	struct drbd_device *device = peer_device->device;
P
Philipp Reisner 已提交
2585 2586
	int self, peer, rv = -100;
	unsigned long ch_self, ch_peer;
2587
	enum drbd_after_sb_p after_sb_0p;
P
Philipp Reisner 已提交
2588

2589 2590
	self = device->ldev->md.uuid[UI_BITMAP] & 1;
	peer = device->p_uuid[UI_BITMAP] & 1;
P
Philipp Reisner 已提交
2591

2592 2593
	ch_peer = device->p_uuid[UI_SIZE];
	ch_self = device->comm_bm_set;
P
Philipp Reisner 已提交
2594

2595
	rcu_read_lock();
2596
	after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
2597 2598
	rcu_read_unlock();
	switch (after_sb_0p) {
P
Philipp Reisner 已提交
2599 2600 2601
	case ASB_CONSENSUS:
	case ASB_DISCARD_SECONDARY:
	case ASB_CALL_HELPER:
2602
	case ASB_VIOLENTLY:
2603
		drbd_err(device, "Configuration error.\n");
P
Philipp Reisner 已提交
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
		break;
	case ASB_DISCONNECT:
		break;
	case ASB_DISCARD_YOUNGER_PRI:
		if (self == 0 && peer == 1) {
			rv = -1;
			break;
		}
		if (self == 1 && peer == 0) {
			rv =  1;
			break;
		}
		/* Else fall through to one of the other strategies... */
	case ASB_DISCARD_OLDER_PRI:
		if (self == 0 && peer == 1) {
			rv = 1;
			break;
		}
		if (self == 1 && peer == 0) {
			rv = -1;
			break;
		}
		/* Else fall through to one of the other strategies... */
2627
		drbd_warn(device, "Discard younger/older primary did not find a decision\n"
P
Philipp Reisner 已提交
2628 2629 2630
		     "Using discard-least-changes instead\n");
	case ASB_DISCARD_ZERO_CHG:
		if (ch_peer == 0 && ch_self == 0) {
2631
			rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
P
Philipp Reisner 已提交
2632 2633 2634 2635 2636 2637
				? -1 : 1;
			break;
		} else {
			if (ch_peer == 0) { rv =  1; break; }
			if (ch_self == 0) { rv = -1; break; }
		}
2638
		if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
P
Philipp Reisner 已提交
2639 2640 2641 2642 2643 2644 2645 2646
			break;
	case ASB_DISCARD_LEAST_CHG:
		if	(ch_self < ch_peer)
			rv = -1;
		else if (ch_self > ch_peer)
			rv =  1;
		else /* ( ch_self == ch_peer ) */
		     /* Well, then use something else. */
2647
			rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
P
Philipp Reisner 已提交
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
				? -1 : 1;
		break;
	case ASB_DISCARD_LOCAL:
		rv = -1;
		break;
	case ASB_DISCARD_REMOTE:
		rv =  1;
	}

	return rv;
}

2660 2661 2662 2663
/**
 * drbd_asb_recover_1p  -  Recover after split-brain with one remaining primary
 */
static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
P
Philipp Reisner 已提交
2664
{
2665
	struct drbd_device *device = peer_device->device;
2666
	int hg, rv = -100;
2667
	enum drbd_after_sb_p after_sb_1p;
P
Philipp Reisner 已提交
2668

2669
	rcu_read_lock();
2670
	after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
2671 2672
	rcu_read_unlock();
	switch (after_sb_1p) {
P
Philipp Reisner 已提交
2673 2674 2675 2676 2677
	case ASB_DISCARD_YOUNGER_PRI:
	case ASB_DISCARD_OLDER_PRI:
	case ASB_DISCARD_LEAST_CHG:
	case ASB_DISCARD_LOCAL:
	case ASB_DISCARD_REMOTE:
2678
	case ASB_DISCARD_ZERO_CHG:
2679
		drbd_err(device, "Configuration error.\n");
P
Philipp Reisner 已提交
2680 2681 2682 2683
		break;
	case ASB_DISCONNECT:
		break;
	case ASB_CONSENSUS:
2684
		hg = drbd_asb_recover_0p(peer_device);
2685
		if (hg == -1 && device->state.role == R_SECONDARY)
P
Philipp Reisner 已提交
2686
			rv = hg;
2687
		if (hg == 1  && device->state.role == R_PRIMARY)
P
Philipp Reisner 已提交
2688 2689 2690
			rv = hg;
		break;
	case ASB_VIOLENTLY:
2691
		rv = drbd_asb_recover_0p(peer_device);
P
Philipp Reisner 已提交
2692 2693
		break;
	case ASB_DISCARD_SECONDARY:
2694
		return device->state.role == R_PRIMARY ? 1 : -1;
P
Philipp Reisner 已提交
2695
	case ASB_CALL_HELPER:
2696
		hg = drbd_asb_recover_0p(peer_device);
2697
		if (hg == -1 && device->state.role == R_PRIMARY) {
2698 2699
			enum drbd_state_rv rv2;

P
Philipp Reisner 已提交
2700 2701 2702
			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
			  * we might be here in C_WF_REPORT_PARAMS which is transient.
			  * we do not need to wait for the after state change work either. */
2703
			rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
2704
			if (rv2 != SS_SUCCESS) {
2705
				drbd_khelper(device, "pri-lost-after-sb");
P
Philipp Reisner 已提交
2706
			} else {
2707
				drbd_warn(device, "Successfully gave up primary role.\n");
P
Philipp Reisner 已提交
2708 2709 2710 2711 2712 2713 2714 2715 2716
				rv = hg;
			}
		} else
			rv = hg;
	}

	return rv;
}

2717 2718 2719 2720
/**
 * drbd_asb_recover_2p  -  Recover after split-brain with two remaining primaries
 */
static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
P
Philipp Reisner 已提交
2721
{
2722
	struct drbd_device *device = peer_device->device;
2723
	int hg, rv = -100;
2724
	enum drbd_after_sb_p after_sb_2p;
P
Philipp Reisner 已提交
2725

2726
	rcu_read_lock();
2727
	after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
2728 2729
	rcu_read_unlock();
	switch (after_sb_2p) {
P
Philipp Reisner 已提交
2730 2731 2732 2733 2734 2735 2736
	case ASB_DISCARD_YOUNGER_PRI:
	case ASB_DISCARD_OLDER_PRI:
	case ASB_DISCARD_LEAST_CHG:
	case ASB_DISCARD_LOCAL:
	case ASB_DISCARD_REMOTE:
	case ASB_CONSENSUS:
	case ASB_DISCARD_SECONDARY:
2737
	case ASB_DISCARD_ZERO_CHG:
2738
		drbd_err(device, "Configuration error.\n");
P
Philipp Reisner 已提交
2739 2740
		break;
	case ASB_VIOLENTLY:
2741
		rv = drbd_asb_recover_0p(peer_device);
P
Philipp Reisner 已提交
2742 2743 2744 2745
		break;
	case ASB_DISCONNECT:
		break;
	case ASB_CALL_HELPER:
2746
		hg = drbd_asb_recover_0p(peer_device);
P
Philipp Reisner 已提交
2747
		if (hg == -1) {
2748 2749
			enum drbd_state_rv rv2;

P
Philipp Reisner 已提交
2750 2751 2752
			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
			  * we might be here in C_WF_REPORT_PARAMS which is transient.
			  * we do not need to wait for the after state change work either. */
2753
			rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
2754
			if (rv2 != SS_SUCCESS) {
2755
				drbd_khelper(device, "pri-lost-after-sb");
P
Philipp Reisner 已提交
2756
			} else {
2757
				drbd_warn(device, "Successfully gave up primary role.\n");
P
Philipp Reisner 已提交
2758 2759 2760 2761 2762 2763 2764 2765 2766
				rv = hg;
			}
		} else
			rv = hg;
	}

	return rv;
}

2767
static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
P
Philipp Reisner 已提交
2768 2769 2770
			   u64 bits, u64 flags)
{
	if (!uuid) {
2771
		drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
P
Philipp Reisner 已提交
2772 2773
		return;
	}
2774
	drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
P
Philipp Reisner 已提交
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
	     text,
	     (unsigned long long)uuid[UI_CURRENT],
	     (unsigned long long)uuid[UI_BITMAP],
	     (unsigned long long)uuid[UI_HISTORY_START],
	     (unsigned long long)uuid[UI_HISTORY_END],
	     (unsigned long long)bits,
	     (unsigned long long)flags);
}

/*
  100	after split brain try auto recover
    2	C_SYNC_SOURCE set BitMap
    1	C_SYNC_SOURCE use BitMap
    0	no Sync
   -1	C_SYNC_TARGET use BitMap
   -2	C_SYNC_TARGET set BitMap
 -100	after split brain, disconnect
-1000	unrelated data
2793 2794
-1091   requires proto 91
-1096   requires proto 96
P
Philipp Reisner 已提交
2795
 */
2796
static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local)
P
Philipp Reisner 已提交
2797 2798 2799 2800
{
	u64 self, peer;
	int i, j;

2801 2802
	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
P
Philipp Reisner 已提交
2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820

	*rule_nr = 10;
	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
		return 0;

	*rule_nr = 20;
	if ((self == UUID_JUST_CREATED || self == (u64)0) &&
	     peer != UUID_JUST_CREATED)
		return -2;

	*rule_nr = 30;
	if (self != UUID_JUST_CREATED &&
	    (peer == UUID_JUST_CREATED || peer == (u64)0))
		return 2;

	if (self == peer) {
		int rct, dc; /* roles at crash time */

2821
		if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
P
Philipp Reisner 已提交
2822

2823
			if (first_peer_device(device)->connection->agreed_pro_version < 91)
2824
				return -1091;
P
Philipp Reisner 已提交
2825

2826 2827
			if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
			    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2828
				drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
2829 2830 2831
				drbd_uuid_move_history(device);
				device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
				device->ldev->md.uuid[UI_BITMAP] = 0;
P
Philipp Reisner 已提交
2832

2833 2834
				drbd_uuid_dump(device, "self", device->ldev->md.uuid,
					       device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
P
Philipp Reisner 已提交
2835 2836
				*rule_nr = 34;
			} else {
2837
				drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
P
Philipp Reisner 已提交
2838 2839 2840 2841 2842 2843
				*rule_nr = 36;
			}

			return 1;
		}

2844
		if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
P
Philipp Reisner 已提交
2845

2846
			if (first_peer_device(device)->connection->agreed_pro_version < 91)
2847
				return -1091;
P
Philipp Reisner 已提交
2848

2849 2850
			if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
			    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2851
				drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
P
Philipp Reisner 已提交
2852

2853 2854 2855
				device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
				device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
				device->p_uuid[UI_BITMAP] = 0UL;
P
Philipp Reisner 已提交
2856

2857
				drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
P
Philipp Reisner 已提交
2858 2859
				*rule_nr = 35;
			} else {
2860
				drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
P
Philipp Reisner 已提交
2861 2862 2863 2864 2865 2866 2867
				*rule_nr = 37;
			}

			return -1;
		}

		/* Common power [off|failure] */
2868 2869
		rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
			(device->p_uuid[UI_FLAGS] & 2);
P
Philipp Reisner 已提交
2870 2871 2872 2873 2874 2875 2876 2877 2878
		/* lowest bit is set when we were primary,
		 * next bit (weight 2) is set when peer was primary */
		*rule_nr = 40;

		switch (rct) {
		case 0: /* !self_pri && !peer_pri */ return 0;
		case 1: /*  self_pri && !peer_pri */ return 1;
		case 2: /* !self_pri &&  peer_pri */ return -1;
		case 3: /*  self_pri &&  peer_pri */
2879
			dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
P
Philipp Reisner 已提交
2880 2881 2882 2883 2884
			return dc ? -1 : 1;
		}
	}

	*rule_nr = 50;
2885
	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
P
Philipp Reisner 已提交
2886 2887 2888 2889
	if (self == peer)
		return -1;

	*rule_nr = 51;
2890
	peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
P
Philipp Reisner 已提交
2891
	if (self == peer) {
2892
		if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
2893 2894 2895
		    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
		    (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
		    peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
P
Philipp Reisner 已提交
2896 2897 2898
			/* The last P_SYNC_UUID did not get though. Undo the last start of
			   resync as sync source modifications of the peer's UUIDs. */

2899
			if (first_peer_device(device)->connection->agreed_pro_version < 91)
2900
				return -1091;
P
Philipp Reisner 已提交
2901

2902 2903
			device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
			device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
2904

2905
			drbd_info(device, "Lost last syncUUID packet, corrected:\n");
2906
			drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
2907

P
Philipp Reisner 已提交
2908 2909 2910 2911 2912
			return -1;
		}
	}

	*rule_nr = 60;
2913
	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
P
Philipp Reisner 已提交
2914
	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2915
		peer = device->p_uuid[i] & ~((u64)1);
P
Philipp Reisner 已提交
2916 2917 2918 2919 2920
		if (self == peer)
			return -2;
	}

	*rule_nr = 70;
2921 2922
	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
P
Philipp Reisner 已提交
2923 2924 2925 2926
	if (self == peer)
		return 1;

	*rule_nr = 71;
2927
	self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
P
Philipp Reisner 已提交
2928
	if (self == peer) {
2929
		if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
2930 2931 2932
		    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
		    (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
		    self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
P
Philipp Reisner 已提交
2933 2934 2935
			/* The last P_SYNC_UUID did not get though. Undo the last start of
			   resync as sync source modifications of our UUIDs. */

2936
			if (first_peer_device(device)->connection->agreed_pro_version < 91)
2937
				return -1091;
P
Philipp Reisner 已提交
2938

2939 2940
			__drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
			__drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
P
Philipp Reisner 已提交
2941

2942
			drbd_info(device, "Last syncUUID did not get through, corrected:\n");
2943 2944
			drbd_uuid_dump(device, "self", device->ldev->md.uuid,
				       device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
P
Philipp Reisner 已提交
2945 2946 2947 2948 2949 2950 2951

			return 1;
		}
	}


	*rule_nr = 80;
2952
	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
P
Philipp Reisner 已提交
2953
	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2954
		self = device->ldev->md.uuid[i] & ~((u64)1);
P
Philipp Reisner 已提交
2955 2956 2957 2958 2959
		if (self == peer)
			return 2;
	}

	*rule_nr = 90;
2960 2961
	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
P
Philipp Reisner 已提交
2962 2963 2964 2965 2966
	if (self == peer && self != ((u64)0))
		return 100;

	*rule_nr = 100;
	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2967
		self = device->ldev->md.uuid[i] & ~((u64)1);
P
Philipp Reisner 已提交
2968
		for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2969
			peer = device->p_uuid[j] & ~((u64)1);
P
Philipp Reisner 已提交
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
			if (self == peer)
				return -100;
		}
	}

	return -1000;
}

/* drbd_sync_handshake() returns the new conn state on success, or
   CONN_MASK (-1) on failure.
 */
2981 2982
static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
					   enum drbd_role peer_role,
P
Philipp Reisner 已提交
2983 2984
					   enum drbd_disk_state peer_disk) __must_hold(local)
{
2985
	struct drbd_device *device = peer_device->device;
P
Philipp Reisner 已提交
2986 2987
	enum drbd_conns rv = C_MASK;
	enum drbd_disk_state mydisk;
2988
	struct net_conf *nc;
2989
	int hg, rule_nr, rr_conflict, tentative;
P
Philipp Reisner 已提交
2990

2991
	mydisk = device->state.disk;
P
Philipp Reisner 已提交
2992
	if (mydisk == D_NEGOTIATING)
2993
		mydisk = device->new_state_tmp.disk;
P
Philipp Reisner 已提交
2994

2995
	drbd_info(device, "drbd_sync_handshake:\n");
2996

2997 2998 2999 3000
	spin_lock_irq(&device->ldev->md.uuid_lock);
	drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
	drbd_uuid_dump(device, "peer", device->p_uuid,
		       device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
P
Philipp Reisner 已提交
3001

3002 3003
	hg = drbd_uuid_compare(device, &rule_nr);
	spin_unlock_irq(&device->ldev->md.uuid_lock);
P
Philipp Reisner 已提交
3004

3005
	drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
P
Philipp Reisner 已提交
3006 3007

	if (hg == -1000) {
3008
		drbd_alert(device, "Unrelated data, aborting!\n");
P
Philipp Reisner 已提交
3009 3010
		return C_MASK;
	}
3011
	if (hg < -1000) {
3012
		drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
P
Philipp Reisner 已提交
3013 3014 3015 3016 3017 3018 3019 3020 3021
		return C_MASK;
	}

	if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
	    (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
		int f = (hg == -100) || abs(hg) == 2;
		hg = mydisk > D_INCONSISTENT ? 1 : -1;
		if (f)
			hg = hg*2;
3022
		drbd_info(device, "Becoming sync %s due to disk states.\n",
P
Philipp Reisner 已提交
3023 3024 3025
		     hg > 0 ? "source" : "target");
	}

3026
	if (abs(hg) == 100)
3027
		drbd_khelper(device, "initial-split-brain");
3028

3029
	rcu_read_lock();
3030
	nc = rcu_dereference(peer_device->connection->net_conf);
3031 3032

	if (hg == 100 || (hg == -100 && nc->always_asbp)) {
3033
		int pcount = (device->state.role == R_PRIMARY)
P
Philipp Reisner 已提交
3034 3035 3036 3037 3038
			   + (peer_role == R_PRIMARY);
		int forced = (hg == -100);

		switch (pcount) {
		case 0:
3039
			hg = drbd_asb_recover_0p(peer_device);
P
Philipp Reisner 已提交
3040 3041
			break;
		case 1:
3042
			hg = drbd_asb_recover_1p(peer_device);
P
Philipp Reisner 已提交
3043 3044
			break;
		case 2:
3045
			hg = drbd_asb_recover_2p(peer_device);
P
Philipp Reisner 已提交
3046 3047 3048
			break;
		}
		if (abs(hg) < 100) {
3049
			drbd_warn(device, "Split-Brain detected, %d primaries, "
P
Philipp Reisner 已提交
3050 3051 3052
			     "automatically solved. Sync from %s node\n",
			     pcount, (hg < 0) ? "peer" : "this");
			if (forced) {
3053
				drbd_warn(device, "Doing a full sync, since"
P
Philipp Reisner 已提交
3054 3055 3056 3057 3058 3059 3060
				     " UUIDs where ambiguous.\n");
				hg = hg*2;
			}
		}
	}

	if (hg == -100) {
3061
		if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
P
Philipp Reisner 已提交
3062
			hg = -1;
3063
		if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
P
Philipp Reisner 已提交
3064 3065 3066
			hg = 1;

		if (abs(hg) < 100)
3067
			drbd_warn(device, "Split-Brain detected, manually solved. "
P
Philipp Reisner 已提交
3068 3069 3070
			     "Sync from %s node\n",
			     (hg < 0) ? "peer" : "this");
	}
3071
	rr_conflict = nc->rr_conflict;
3072
	tentative = nc->tentative;
3073
	rcu_read_unlock();
P
Philipp Reisner 已提交
3074 3075

	if (hg == -100) {
3076 3077 3078 3079
		/* FIXME this log message is not correct if we end up here
		 * after an attempted attach on a diskless node.
		 * We just refuse to attach -- well, we drop the "connection"
		 * to that disk, in a way... */
3080
		drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
3081
		drbd_khelper(device, "split-brain");
P
Philipp Reisner 已提交
3082 3083 3084 3085
		return C_MASK;
	}

	if (hg > 0 && mydisk <= D_INCONSISTENT) {
3086
		drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
P
Philipp Reisner 已提交
3087 3088 3089 3090
		return C_MASK;
	}

	if (hg < 0 && /* by intention we do not use mydisk here. */
3091
	    device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
3092
		switch (rr_conflict) {
P
Philipp Reisner 已提交
3093
		case ASB_CALL_HELPER:
3094
			drbd_khelper(device, "pri-lost");
P
Philipp Reisner 已提交
3095 3096
			/* fall through */
		case ASB_DISCONNECT:
3097
			drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
P
Philipp Reisner 已提交
3098 3099
			return C_MASK;
		case ASB_VIOLENTLY:
3100
			drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
P
Philipp Reisner 已提交
3101 3102 3103 3104
			     "assumption\n");
		}
	}

3105
	if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
3106
		if (hg == 0)
3107
			drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
3108
		else
3109
			drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
3110 3111 3112 3113 3114
				 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
				 abs(hg) >= 2 ? "full" : "bit-map based");
		return C_MASK;
	}

P
Philipp Reisner 已提交
3115
	if (abs(hg) >= 2) {
3116
		drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3117
		if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3118
					BM_LOCKED_SET_ALLOWED))
P
Philipp Reisner 已提交
3119 3120 3121 3122 3123 3124 3125 3126 3127
			return C_MASK;
	}

	if (hg > 0) { /* become sync source. */
		rv = C_WF_BITMAP_S;
	} else if (hg < 0) { /* become sync target */
		rv = C_WF_BITMAP_T;
	} else {
		rv = C_CONNECTED;
3128
		if (drbd_bm_total_weight(device)) {
3129
			drbd_info(device, "No resync, but %lu bits in bitmap!\n",
3130
			     drbd_bm_total_weight(device));
P
Philipp Reisner 已提交
3131 3132 3133 3134 3135 3136
		}
	}

	return rv;
}

3137
static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
P
Philipp Reisner 已提交
3138 3139
{
	/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3140 3141
	if (peer == ASB_DISCARD_REMOTE)
		return ASB_DISCARD_LOCAL;
P
Philipp Reisner 已提交
3142 3143

	/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3144 3145
	if (peer == ASB_DISCARD_LOCAL)
		return ASB_DISCARD_REMOTE;
P
Philipp Reisner 已提交
3146 3147

	/* everything else is valid if they are equal on both sides. */
3148
	return peer;
P
Philipp Reisner 已提交
3149 3150
}

3151
static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3152
{
3153
	struct p_protocol *p = pi->data;
3154 3155 3156 3157
	enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
	int p_proto, p_discard_my_data, p_two_primaries, cf;
	struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
	char integrity_alg[SHARED_SECRET_MAX] = "";
3158
	struct crypto_hash *peer_integrity_tfm = NULL;
3159
	void *int_dig_in = NULL, *int_dig_vv = NULL;
P
Philipp Reisner 已提交
3160 3161 3162 3163 3164 3165

	p_proto		= be32_to_cpu(p->protocol);
	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
	p_two_primaries = be32_to_cpu(p->two_primaries);
3166
	cf		= be32_to_cpu(p->conn_flags);
3167
	p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3168

3169
	if (connection->agreed_pro_version >= 87) {
3170
		int err;
3171

3172
		if (pi->size > sizeof(integrity_alg))
3173
			return -EIO;
3174
		err = drbd_recv_all(connection, integrity_alg, pi->size);
3175 3176
		if (err)
			return err;
3177
		integrity_alg[SHARED_SECRET_MAX - 1] = 0;
P
Philipp Reisner 已提交
3178 3179
	}

3180
	if (pi->cmd != P_PROTOCOL_UPDATE) {
3181
		clear_bit(CONN_DRY_RUN, &connection->flags);
P
Philipp Reisner 已提交
3182

3183
		if (cf & CF_DRY_RUN)
3184
			set_bit(CONN_DRY_RUN, &connection->flags);
P
Philipp Reisner 已提交
3185

3186
		rcu_read_lock();
3187
		nc = rcu_dereference(connection->net_conf);
P
Philipp Reisner 已提交
3188

3189
		if (p_proto != nc->wire_protocol) {
3190
			drbd_err(connection, "incompatible %s settings\n", "protocol");
3191 3192
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3193

3194
		if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3195
			drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
3196 3197
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3198

3199
		if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3200
			drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
3201 3202
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3203

3204
		if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3205
			drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
3206 3207
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3208

3209
		if (p_discard_my_data && nc->discard_my_data) {
3210
			drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
3211 3212
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3213

3214
		if (p_two_primaries != nc->two_primaries) {
3215
			drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
3216 3217
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3218

3219
		if (strcmp(integrity_alg, nc->integrity_alg)) {
3220
			drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
3221 3222
			goto disconnect_rcu_unlock;
		}
P
Philipp Reisner 已提交
3223

3224
		rcu_read_unlock();
P
Philipp Reisner 已提交
3225 3226
	}

3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
	if (integrity_alg[0]) {
		int hash_size;

		/*
		 * We can only change the peer data integrity algorithm
		 * here.  Changing our own data integrity algorithm
		 * requires that we send a P_PROTOCOL_UPDATE packet at
		 * the same time; otherwise, the peer has no way to
		 * tell between which packets the algorithm should
		 * change.
		 */
P
Philipp Reisner 已提交
3238

3239 3240
		peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
		if (!peer_integrity_tfm) {
3241
			drbd_err(connection, "peer data-integrity-alg %s not supported\n",
3242 3243 3244
				 integrity_alg);
			goto disconnect;
		}
P
Philipp Reisner 已提交
3245

3246 3247 3248 3249
		hash_size = crypto_hash_digestsize(peer_integrity_tfm);
		int_dig_in = kmalloc(hash_size, GFP_KERNEL);
		int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
		if (!(int_dig_in && int_dig_vv)) {
3250
			drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
P
Philipp Reisner 已提交
3251 3252 3253 3254
			goto disconnect;
		}
	}

3255 3256
	new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
	if (!new_net_conf) {
3257
		drbd_err(connection, "Allocation of new net_conf failed\n");
3258 3259 3260
		goto disconnect;
	}

3261
	mutex_lock(&connection->data.mutex);
3262
	mutex_lock(&connection->resource->conf_update);
3263
	old_net_conf = connection->net_conf;
3264 3265 3266 3267 3268 3269 3270 3271
	*new_net_conf = *old_net_conf;

	new_net_conf->wire_protocol = p_proto;
	new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
	new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
	new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
	new_net_conf->two_primaries = p_two_primaries;

3272
	rcu_assign_pointer(connection->net_conf, new_net_conf);
3273
	mutex_unlock(&connection->resource->conf_update);
3274
	mutex_unlock(&connection->data.mutex);
3275

3276 3277 3278 3279 3280 3281
	crypto_free_hash(connection->peer_integrity_tfm);
	kfree(connection->int_dig_in);
	kfree(connection->int_dig_vv);
	connection->peer_integrity_tfm = peer_integrity_tfm;
	connection->int_dig_in = int_dig_in;
	connection->int_dig_vv = int_dig_vv;
3282 3283

	if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3284
		drbd_info(connection, "peer data-integrity-alg: %s\n",
3285 3286 3287 3288
			  integrity_alg[0] ? integrity_alg : "(none)");

	synchronize_rcu();
	kfree(old_net_conf);
3289
	return 0;
P
Philipp Reisner 已提交
3290

3291 3292
disconnect_rcu_unlock:
	rcu_read_unlock();
P
Philipp Reisner 已提交
3293
disconnect:
3294
	crypto_free_hash(peer_integrity_tfm);
3295 3296
	kfree(int_dig_in);
	kfree(int_dig_vv);
3297
	conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
3298
	return -EIO;
P
Philipp Reisner 已提交
3299 3300 3301 3302 3303 3304 3305
}

/* helper function
 * input: alg name, feature name
 * return: NULL (alg name was "")
 *         ERR_PTR(error) if something goes wrong
 *         or the crypto hash ptr, if it worked out ok. */
3306
static
3307
struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
P
Philipp Reisner 已提交
3308 3309 3310 3311 3312 3313 3314 3315 3316
		const char *alg, const char *name)
{
	struct crypto_hash *tfm;

	if (!alg[0])
		return NULL;

	tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
3317
		drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
P
Philipp Reisner 已提交
3318 3319 3320 3321 3322 3323
			alg, name, PTR_ERR(tfm));
		return tfm;
	}
	return tfm;
}

3324
static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
3325
{
3326
	void *buffer = connection->data.rbuf;
3327 3328 3329 3330
	int size = pi->size;

	while (size) {
		int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3331
		s = drbd_recv(connection, buffer, s);
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354
		if (s <= 0) {
			if (s < 0)
				return s;
			break;
		}
		size -= s;
	}
	if (size)
		return -EIO;
	return 0;
}

/*
 * config_unknown_volume  -  device configuration command for unknown volume
 *
 * When a device is added to an existing connection, the node on which the
 * device is added first will send configuration commands to its peer but the
 * peer will not know about the device yet.  It will warn and ignore these
 * commands.  Once the device is added on the second node, the second node will
 * send the same device configuration commands, but in the other direction.
 *
 * (We can also end up here if drbd is misconfigured.)
 */
3355
static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
3356
{
3357
	drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
3358
		  cmdname(pi->cmd), pi->vnr);
3359
	return ignore_remaining_packet(connection, pi);
3360 3361
}

3362
static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3363
{
3364
	struct drbd_peer_device *peer_device;
3365
	struct drbd_device *device;
3366
	struct p_rs_param_95 *p;
P
Philipp Reisner 已提交
3367 3368 3369
	unsigned int header_size, data_size, exp_max_sz;
	struct crypto_hash *verify_tfm = NULL;
	struct crypto_hash *csums_tfm = NULL;
3370
	struct net_conf *old_net_conf, *new_net_conf = NULL;
P
Philipp Reisner 已提交
3371
	struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3372
	const int apv = connection->agreed_pro_version;
P
Philipp Reisner 已提交
3373
	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3374
	int fifo_size = 0;
3375
	int err;
P
Philipp Reisner 已提交
3376

3377 3378
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
3379
		return config_unknown_volume(connection, pi);
3380
	device = peer_device->device;
P
Philipp Reisner 已提交
3381 3382 3383 3384

	exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
		    : apv == 88 ? sizeof(struct p_rs_param)
					+ SHARED_SECRET_MAX
3385 3386
		    : apv <= 94 ? sizeof(struct p_rs_param_89)
		    : /* apv >= 95 */ sizeof(struct p_rs_param_95);
P
Philipp Reisner 已提交
3387

3388
	if (pi->size > exp_max_sz) {
3389
		drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3390
		    pi->size, exp_max_sz);
3391
		return -EIO;
P
Philipp Reisner 已提交
3392 3393 3394
	}

	if (apv <= 88) {
3395
		header_size = sizeof(struct p_rs_param);
3396
		data_size = pi->size - header_size;
3397
	} else if (apv <= 94) {
3398
		header_size = sizeof(struct p_rs_param_89);
3399
		data_size = pi->size - header_size;
3400
		D_ASSERT(device, data_size == 0);
3401
	} else {
3402
		header_size = sizeof(struct p_rs_param_95);
3403
		data_size = pi->size - header_size;
3404
		D_ASSERT(device, data_size == 0);
P
Philipp Reisner 已提交
3405 3406 3407
	}

	/* initialize verify_alg and csums_alg */
3408
	p = pi->data;
P
Philipp Reisner 已提交
3409 3410
	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);

3411
	err = drbd_recv_all(peer_device->connection, p, header_size);
3412 3413
	if (err)
		return err;
P
Philipp Reisner 已提交
3414

3415
	mutex_lock(&connection->resource->conf_update);
3416
	old_net_conf = peer_device->connection->net_conf;
3417
	if (get_ldev(device)) {
P
Philipp Reisner 已提交
3418 3419
		new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
		if (!new_disk_conf) {
3420
			put_ldev(device);
3421
			mutex_unlock(&connection->resource->conf_update);
3422
			drbd_err(device, "Allocation of new disk_conf failed\n");
P
Philipp Reisner 已提交
3423 3424
			return -ENOMEM;
		}
P
Philipp Reisner 已提交
3425

3426
		old_disk_conf = device->ldev->disk_conf;
P
Philipp Reisner 已提交
3427
		*new_disk_conf = *old_disk_conf;
P
Philipp Reisner 已提交
3428

3429
		new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
P
Philipp Reisner 已提交
3430
	}
P
Philipp Reisner 已提交
3431 3432 3433

	if (apv >= 88) {
		if (apv == 88) {
3434
			if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3435
				drbd_err(device, "verify-alg of wrong size, "
3436 3437
					"peer wants %u, accepting only up to %u byte\n",
					data_size, SHARED_SECRET_MAX);
P
Philipp Reisner 已提交
3438 3439
				err = -EIO;
				goto reconnect;
P
Philipp Reisner 已提交
3440 3441
			}

3442
			err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
P
Philipp Reisner 已提交
3443 3444
			if (err)
				goto reconnect;
P
Philipp Reisner 已提交
3445 3446
			/* we expect NUL terminated string */
			/* but just in case someone tries to be evil */
3447
			D_ASSERT(device, p->verify_alg[data_size-1] == 0);
P
Philipp Reisner 已提交
3448 3449 3450 3451 3452
			p->verify_alg[data_size-1] = 0;

		} else /* apv >= 89 */ {
			/* we still expect NUL terminated strings */
			/* but just in case someone tries to be evil */
3453 3454
			D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
			D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
P
Philipp Reisner 已提交
3455 3456 3457 3458
			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
		}

3459
		if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3460
			if (device->state.conn == C_WF_REPORT_PARAMS) {
3461
				drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3462
				    old_net_conf->verify_alg, p->verify_alg);
P
Philipp Reisner 已提交
3463 3464
				goto disconnect;
			}
3465
			verify_tfm = drbd_crypto_alloc_digest_safe(device,
P
Philipp Reisner 已提交
3466 3467 3468 3469 3470 3471 3472
					p->verify_alg, "verify-alg");
			if (IS_ERR(verify_tfm)) {
				verify_tfm = NULL;
				goto disconnect;
			}
		}

3473
		if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3474
			if (device->state.conn == C_WF_REPORT_PARAMS) {
3475
				drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3476
				    old_net_conf->csums_alg, p->csums_alg);
P
Philipp Reisner 已提交
3477 3478
				goto disconnect;
			}
3479
			csums_tfm = drbd_crypto_alloc_digest_safe(device,
P
Philipp Reisner 已提交
3480 3481 3482 3483 3484 3485 3486
					p->csums_alg, "csums-alg");
			if (IS_ERR(csums_tfm)) {
				csums_tfm = NULL;
				goto disconnect;
			}
		}

P
Philipp Reisner 已提交
3487
		if (apv > 94 && new_disk_conf) {
P
Philipp Reisner 已提交
3488 3489 3490 3491
			new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
			new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
			new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
			new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3492

P
Philipp Reisner 已提交
3493
			fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3494
			if (fifo_size != device->rs_plan_s->size) {
P
Philipp Reisner 已提交
3495 3496
				new_plan = fifo_alloc(fifo_size);
				if (!new_plan) {
3497
					drbd_err(device, "kmalloc of fifo_buffer failed");
3498
					put_ldev(device);
3499 3500 3501
					goto disconnect;
				}
			}
3502
		}
P
Philipp Reisner 已提交
3503

3504
		if (verify_tfm || csums_tfm) {
3505 3506
			new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
			if (!new_net_conf) {
3507
				drbd_err(device, "Allocation of new net_conf failed\n");
3508 3509 3510
				goto disconnect;
			}

3511
			*new_net_conf = *old_net_conf;
3512 3513

			if (verify_tfm) {
3514 3515
				strcpy(new_net_conf->verify_alg, p->verify_alg);
				new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3516 3517
				crypto_free_hash(peer_device->connection->verify_tfm);
				peer_device->connection->verify_tfm = verify_tfm;
3518
				drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
3519 3520
			}
			if (csums_tfm) {
3521 3522
				strcpy(new_net_conf->csums_alg, p->csums_alg);
				new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3523 3524
				crypto_free_hash(peer_device->connection->csums_tfm);
				peer_device->connection->csums_tfm = csums_tfm;
3525
				drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
3526
			}
3527
			rcu_assign_pointer(connection->net_conf, new_net_conf);
3528
		}
P
Philipp Reisner 已提交
3529 3530
	}

P
Philipp Reisner 已提交
3531
	if (new_disk_conf) {
3532 3533
		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
		put_ldev(device);
P
Philipp Reisner 已提交
3534 3535 3536
	}

	if (new_plan) {
3537 3538
		old_plan = device->rs_plan_s;
		rcu_assign_pointer(device->rs_plan_s, new_plan);
P
Philipp Reisner 已提交
3539
	}
P
Philipp Reisner 已提交
3540

3541
	mutex_unlock(&connection->resource->conf_update);
P
Philipp Reisner 已提交
3542 3543 3544 3545
	synchronize_rcu();
	if (new_net_conf)
		kfree(old_net_conf);
	kfree(old_disk_conf);
P
Philipp Reisner 已提交
3546
	kfree(old_plan);
P
Philipp Reisner 已提交
3547

3548
	return 0;
P
Philipp Reisner 已提交
3549

P
Philipp Reisner 已提交
3550 3551
reconnect:
	if (new_disk_conf) {
3552
		put_ldev(device);
P
Philipp Reisner 已提交
3553 3554
		kfree(new_disk_conf);
	}
3555
	mutex_unlock(&connection->resource->conf_update);
P
Philipp Reisner 已提交
3556 3557
	return -EIO;

P
Philipp Reisner 已提交
3558
disconnect:
P
Philipp Reisner 已提交
3559 3560
	kfree(new_plan);
	if (new_disk_conf) {
3561
		put_ldev(device);
P
Philipp Reisner 已提交
3562 3563
		kfree(new_disk_conf);
	}
3564
	mutex_unlock(&connection->resource->conf_update);
P
Philipp Reisner 已提交
3565 3566 3567 3568 3569
	/* just for completeness: actually not needed,
	 * as this is not reached if csums_tfm was ok. */
	crypto_free_hash(csums_tfm);
	/* but free the verify_tfm again, if csums_tfm did not work out */
	crypto_free_hash(verify_tfm);
3570
	conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3571
	return -EIO;
P
Philipp Reisner 已提交
3572 3573 3574
}

/* warn if the arguments differ by more than 12.5% */
3575
static void warn_if_differ_considerably(struct drbd_device *device,
P
Philipp Reisner 已提交
3576 3577 3578 3579 3580 3581 3582
	const char *s, sector_t a, sector_t b)
{
	sector_t d;
	if (a == 0 || b == 0)
		return;
	d = (a > b) ? (a - b) : (b - a);
	if (d > (a>>3) || d > (b>>3))
3583
		drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
P
Philipp Reisner 已提交
3584 3585 3586
		     (unsigned long long)a, (unsigned long long)b);
}

3587
static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3588
{
3589
	struct drbd_peer_device *peer_device;
3590
	struct drbd_device *device;
3591
	struct p_sizes *p = pi->data;
3592
	enum determine_dev_size dd = DS_UNCHANGED;
P
Philipp Reisner 已提交
3593 3594
	sector_t p_size, p_usize, my_usize;
	int ldsc = 0; /* local disk size changed */
3595
	enum dds_flags ddsf;
P
Philipp Reisner 已提交
3596

3597 3598
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
3599
		return config_unknown_volume(connection, pi);
3600
	device = peer_device->device;
3601

P
Philipp Reisner 已提交
3602 3603 3604 3605 3606
	p_size = be64_to_cpu(p->d_size);
	p_usize = be64_to_cpu(p->u_size);

	/* just store the peer's disk size for now.
	 * we still need to figure out whether we accept that. */
3607
	device->p_size = p_size;
P
Philipp Reisner 已提交
3608

3609
	if (get_ldev(device)) {
P
Philipp Reisner 已提交
3610
		rcu_read_lock();
3611
		my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
P
Philipp Reisner 已提交
3612 3613
		rcu_read_unlock();

3614 3615 3616
		warn_if_differ_considerably(device, "lower level device sizes",
			   p_size, drbd_get_max_capacity(device->ldev));
		warn_if_differ_considerably(device, "user requested size",
P
Philipp Reisner 已提交
3617
					    p_usize, my_usize);
P
Philipp Reisner 已提交
3618 3619 3620

		/* if this is the first connect, or an otherwise expected
		 * param exchange, choose the minimum */
3621
		if (device->state.conn == C_WF_REPORT_PARAMS)
P
Philipp Reisner 已提交
3622
			p_usize = min_not_zero(my_usize, p_usize);
P
Philipp Reisner 已提交
3623 3624 3625

		/* Never shrink a device with usable data during connect.
		   But allow online shrinking if we are connected. */
3626 3627 3628 3629
		if (drbd_new_dev_size(device, device->ldev, p_usize, 0) <
		    drbd_get_capacity(device->this_bdev) &&
		    device->state.disk >= D_OUTDATED &&
		    device->state.conn < C_CONNECTED) {
3630
			drbd_err(device, "The peer's disk size is too small!\n");
3631
			conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3632
			put_ldev(device);
3633
			return -EIO;
P
Philipp Reisner 已提交
3634
		}
P
Philipp Reisner 已提交
3635 3636 3637 3638 3639 3640

		if (my_usize != p_usize) {
			struct disk_conf *old_disk_conf, *new_disk_conf = NULL;

			new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
			if (!new_disk_conf) {
3641
				drbd_err(device, "Allocation of new disk_conf failed\n");
3642
				put_ldev(device);
P
Philipp Reisner 已提交
3643 3644 3645
				return -ENOMEM;
			}

3646
			mutex_lock(&connection->resource->conf_update);
3647
			old_disk_conf = device->ldev->disk_conf;
P
Philipp Reisner 已提交
3648 3649 3650
			*new_disk_conf = *old_disk_conf;
			new_disk_conf->disk_size = p_usize;

3651
			rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3652
			mutex_unlock(&connection->resource->conf_update);
P
Philipp Reisner 已提交
3653 3654 3655
			synchronize_rcu();
			kfree(old_disk_conf);

3656
			drbd_info(device, "Peer sets u_size to %lu sectors\n",
P
Philipp Reisner 已提交
3657
				 (unsigned long)my_usize);
P
Philipp Reisner 已提交
3658
		}
P
Philipp Reisner 已提交
3659

3660
		put_ldev(device);
P
Philipp Reisner 已提交
3661 3662
	}

3663
	ddsf = be16_to_cpu(p->dds_flags);
3664 3665 3666
	if (get_ldev(device)) {
		dd = drbd_determine_dev_size(device, ddsf, NULL);
		put_ldev(device);
3667
		if (dd == DS_ERROR)
3668
			return -EIO;
3669
		drbd_md_sync(device);
P
Philipp Reisner 已提交
3670 3671
	} else {
		/* I am diskless, need to accept the peer's size. */
3672
		drbd_set_my_capacity(device, p_size);
P
Philipp Reisner 已提交
3673 3674
	}

3675 3676
	device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
	drbd_reconsider_max_bio_size(device);
3677

3678 3679 3680
	if (get_ldev(device)) {
		if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
			device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
P
Philipp Reisner 已提交
3681 3682 3683
			ldsc = 1;
		}

3684
		put_ldev(device);
P
Philipp Reisner 已提交
3685 3686
	}

3687
	if (device->state.conn > C_WF_REPORT_PARAMS) {
P
Philipp Reisner 已提交
3688
		if (be64_to_cpu(p->c_size) !=
3689
		    drbd_get_capacity(device->this_bdev) || ldsc) {
P
Philipp Reisner 已提交
3690 3691
			/* we have different sizes, probably peer
			 * needs to know my new size... */
3692
			drbd_send_sizes(peer_device, 0, ddsf);
P
Philipp Reisner 已提交
3693
		}
3694 3695 3696 3697
		if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
		    (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
			if (device->state.pdsk >= D_INCONSISTENT &&
			    device->state.disk >= D_INCONSISTENT) {
3698
				if (ddsf & DDSF_NO_RESYNC)
3699
					drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
3700
				else
3701
					resync_after_online_grow(device);
3702
			} else
3703
				set_bit(RESYNC_AFTER_NEG, &device->flags);
P
Philipp Reisner 已提交
3704 3705 3706
		}
	}

3707
	return 0;
P
Philipp Reisner 已提交
3708 3709
}

3710
static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3711
{
3712
	struct drbd_peer_device *peer_device;
3713
	struct drbd_device *device;
3714
	struct p_uuids *p = pi->data;
P
Philipp Reisner 已提交
3715
	u64 *p_uuid;
3716
	int i, updated_uuids = 0;
P
Philipp Reisner 已提交
3717

3718 3719
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
3720
		return config_unknown_volume(connection, pi);
3721
	device = peer_device->device;
3722

P
Philipp Reisner 已提交
3723
	p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3724
	if (!p_uuid) {
3725
		drbd_err(device, "kmalloc of p_uuid failed\n");
3726 3727
		return false;
	}
P
Philipp Reisner 已提交
3728 3729 3730 3731

	for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
		p_uuid[i] = be64_to_cpu(p->uuid[i]);

3732 3733
	kfree(device->p_uuid);
	device->p_uuid = p_uuid;
P
Philipp Reisner 已提交
3734

3735 3736 3737 3738
	if (device->state.conn < C_CONNECTED &&
	    device->state.disk < D_INCONSISTENT &&
	    device->state.role == R_PRIMARY &&
	    (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3739
		drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
3740
		    (unsigned long long)device->ed_uuid);
3741
		conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3742
		return -EIO;
P
Philipp Reisner 已提交
3743 3744
	}

3745
	if (get_ldev(device)) {
P
Philipp Reisner 已提交
3746
		int skip_initial_sync =
3747
			device->state.conn == C_CONNECTED &&
3748
			peer_device->connection->agreed_pro_version >= 90 &&
3749
			device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
P
Philipp Reisner 已提交
3750 3751
			(p_uuid[UI_FLAGS] & 8);
		if (skip_initial_sync) {
3752
			drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
3753
			drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3754 3755
					"clear_n_write from receive_uuids",
					BM_LOCKED_TEST_ALLOWED);
3756 3757 3758
			_drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
			_drbd_uuid_set(device, UI_BITMAP, 0);
			_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
P
Philipp Reisner 已提交
3759
					CS_VERBOSE, NULL);
3760
			drbd_md_sync(device);
3761
			updated_uuids = 1;
P
Philipp Reisner 已提交
3762
		}
3763 3764 3765
		put_ldev(device);
	} else if (device->state.disk < D_INCONSISTENT &&
		   device->state.role == R_PRIMARY) {
3766 3767
		/* I am a diskless primary, the peer just created a new current UUID
		   for me. */
3768
		updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
P
Philipp Reisner 已提交
3769 3770 3771 3772 3773 3774
	}

	/* Before we test for the disk state, we should wait until an eventually
	   ongoing cluster wide state change is finished. That is important if
	   we are primary and are detaching from our disk. We need to see the
	   new disk state... */
3775 3776 3777 3778
	mutex_lock(device->state_mutex);
	mutex_unlock(device->state_mutex);
	if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
		updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
3779 3780

	if (updated_uuids)
3781
		drbd_print_uuids(device, "receiver updated UUIDs to");
P
Philipp Reisner 已提交
3782

3783
	return 0;
P
Philipp Reisner 已提交
3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794
}

/**
 * convert_state() - Converts the peer's view of the cluster state to our point of view
 * @ps:		The state as seen by the peer.
 */
static union drbd_state convert_state(union drbd_state ps)
{
	union drbd_state ms;

	static enum drbd_conns c_tab[] = {
3795
		[C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
P
Philipp Reisner 已提交
3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816
		[C_CONNECTED] = C_CONNECTED,

		[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
		[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
		[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
		[C_VERIFY_S]       = C_VERIFY_T,
		[C_MASK]   = C_MASK,
	};

	ms.i = ps.i;

	ms.conn = c_tab[ps.conn];
	ms.peer = ps.role;
	ms.role = ps.peer;
	ms.pdsk = ps.disk;
	ms.disk = ps.pdsk;
	ms.peer_isp = (ps.aftr_isp | ps.user_isp);

	return ms;
}

3817
static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3818
{
3819
	struct drbd_peer_device *peer_device;
3820
	struct drbd_device *device;
3821
	struct p_req_state *p = pi->data;
P
Philipp Reisner 已提交
3822
	union drbd_state mask, val;
3823
	enum drbd_state_rv rv;
P
Philipp Reisner 已提交
3824

3825 3826
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
3827
		return -EIO;
3828
	device = peer_device->device;
3829

P
Philipp Reisner 已提交
3830 3831 3832
	mask.i = be32_to_cpu(p->mask);
	val.i = be32_to_cpu(p->val);

3833
	if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
3834
	    mutex_is_locked(device->state_mutex)) {
3835
		drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
3836
		return 0;
P
Philipp Reisner 已提交
3837 3838 3839 3840 3841
	}

	mask = convert_state(mask);
	val = convert_state(val);

3842
	rv = drbd_change_state(device, CS_VERBOSE, mask, val);
3843
	drbd_send_sr_reply(peer_device, rv);
P
Philipp Reisner 已提交
3844

3845
	drbd_md_sync(device);
P
Philipp Reisner 已提交
3846

3847
	return 0;
P
Philipp Reisner 已提交
3848 3849
}

3850
static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3851
{
3852
	struct p_req_state *p = pi->data;
P
Philipp Reisner 已提交
3853
	union drbd_state mask, val;
3854
	enum drbd_state_rv rv;
P
Philipp Reisner 已提交
3855 3856 3857 3858

	mask.i = be32_to_cpu(p->mask);
	val.i = be32_to_cpu(p->val);

3859 3860 3861
	if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
	    mutex_is_locked(&connection->cstate_mutex)) {
		conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
3862
		return 0;
P
Philipp Reisner 已提交
3863 3864 3865 3866 3867
	}

	mask = convert_state(mask);
	val = convert_state(val);

3868 3869
	rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
	conn_send_sr_reply(connection, rv);
P
Philipp Reisner 已提交
3870

3871
	return 0;
P
Philipp Reisner 已提交
3872 3873
}

3874
static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
3875
{
3876
	struct drbd_peer_device *peer_device;
3877
	struct drbd_device *device;
3878
	struct p_state *p = pi->data;
3879
	union drbd_state os, ns, peer_state;
P
Philipp Reisner 已提交
3880
	enum drbd_disk_state real_peer_disk;
3881
	enum chg_state_flags cs_flags;
P
Philipp Reisner 已提交
3882 3883
	int rv;

3884 3885
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
3886
		return config_unknown_volume(connection, pi);
3887
	device = peer_device->device;
3888

P
Philipp Reisner 已提交
3889 3890 3891 3892
	peer_state.i = be32_to_cpu(p->state);

	real_peer_disk = peer_state.disk;
	if (peer_state.disk == D_NEGOTIATING) {
3893
		real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3894
		drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
P
Philipp Reisner 已提交
3895 3896
	}

3897
	spin_lock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
3898
 retry:
3899
	os = ns = drbd_read_state(device);
3900
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
3901

3902 3903 3904 3905
	/* If some other part of the code (asender thread, timeout)
	 * already decided to close the connection again,
	 * we must not "re-establish" it here. */
	if (os.conn <= C_TEAR_DOWN)
3906
		return -ECONNRESET;
3907

3908 3909 3910 3911 3912 3913 3914 3915
	/* If this is the "end of sync" confirmation, usually the peer disk
	 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
	 * set) resync started in PausedSyncT, or if the timing of pause-/
	 * unpause-sync events has been "just right", the peer disk may
	 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
	 */
	if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
	    real_peer_disk == D_UP_TO_DATE &&
3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931
	    os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
		/* If we are (becoming) SyncSource, but peer is still in sync
		 * preparation, ignore its uptodate-ness to avoid flapping, it
		 * will change to inconsistent once the peer reaches active
		 * syncing states.
		 * It may have changed syncer-paused flags, however, so we
		 * cannot ignore this completely. */
		if (peer_state.conn > C_CONNECTED &&
		    peer_state.conn < C_SYNC_SOURCE)
			real_peer_disk = D_INCONSISTENT;

		/* if peer_state changes to connected at the same time,
		 * it explicitly notifies us that it finished resync.
		 * Maybe we should finish it up, too? */
		else if (os.conn >= C_SYNC_SOURCE &&
			 peer_state.conn == C_CONNECTED) {
3932 3933
			if (drbd_bm_total_weight(device) <= device->rs_failed)
				drbd_resync_finished(device);
3934
			return 0;
3935 3936 3937
		}
	}

3938 3939 3940
	/* explicit verify finished notification, stop sector reached. */
	if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
	    peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3941 3942
		ov_out_of_sync_print(device);
		drbd_resync_finished(device);
3943
		return 0;
3944 3945
	}

3946 3947 3948 3949 3950 3951 3952 3953 3954
	/* peer says his disk is inconsistent, while we think it is uptodate,
	 * and this happens while the peer still thinks we have a sync going on,
	 * but we think we are already done with the sync.
	 * We ignore this to avoid flapping pdsk.
	 * This should not happen, if the peer is a recent version of drbd. */
	if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
	    os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
		real_peer_disk = D_UP_TO_DATE;

3955 3956
	if (ns.conn == C_WF_REPORT_PARAMS)
		ns.conn = C_CONNECTED;
P
Philipp Reisner 已提交
3957

3958 3959 3960
	if (peer_state.conn == C_AHEAD)
		ns.conn = C_BEHIND;

3961 3962
	if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
	    get_ldev_if_state(device, D_NEGOTIATING)) {
P
Philipp Reisner 已提交
3963 3964 3965
		int cr; /* consider resync */

		/* if we established a new connection */
3966
		cr  = (os.conn < C_CONNECTED);
P
Philipp Reisner 已提交
3967 3968
		/* if we had an established connection
		 * and one of the nodes newly attaches a disk */
3969
		cr |= (os.conn == C_CONNECTED &&
P
Philipp Reisner 已提交
3970
		       (peer_state.disk == D_NEGOTIATING ||
3971
			os.disk == D_NEGOTIATING));
P
Philipp Reisner 已提交
3972 3973
		/* if we have both been inconsistent, and the peer has been
		 * forced to be UpToDate with --overwrite-data */
3974
		cr |= test_bit(CONSIDER_RESYNC, &device->flags);
P
Philipp Reisner 已提交
3975 3976
		/* if we had been plain connected, and the admin requested to
		 * start a sync by "invalidate" or "invalidate-remote" */
3977
		cr |= (os.conn == C_CONNECTED &&
P
Philipp Reisner 已提交
3978 3979 3980 3981
				(peer_state.conn >= C_STARTING_SYNC_S &&
				 peer_state.conn <= C_WF_BITMAP_T));

		if (cr)
3982
			ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
P
Philipp Reisner 已提交
3983

3984
		put_ldev(device);
3985 3986
		if (ns.conn == C_MASK) {
			ns.conn = C_CONNECTED;
3987 3988
			if (device->state.disk == D_NEGOTIATING) {
				drbd_force_state(device, NS(disk, D_FAILED));
P
Philipp Reisner 已提交
3989
			} else if (peer_state.disk == D_NEGOTIATING) {
3990
				drbd_err(device, "Disk attach process on the peer node was aborted.\n");
P
Philipp Reisner 已提交
3991
				peer_state.disk = D_DISKLESS;
3992
				real_peer_disk = D_DISKLESS;
P
Philipp Reisner 已提交
3993
			} else {
3994
				if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
3995
					return -EIO;
3996
				D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
3997
				conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3998
				return -EIO;
P
Philipp Reisner 已提交
3999 4000 4001 4002
			}
		}
	}

4003
	spin_lock_irq(&device->resource->req_lock);
4004
	if (os.i != drbd_read_state(device).i)
P
Philipp Reisner 已提交
4005
		goto retry;
4006
	clear_bit(CONSIDER_RESYNC, &device->flags);
P
Philipp Reisner 已提交
4007 4008 4009
	ns.peer = peer_state.role;
	ns.pdsk = real_peer_disk;
	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4010
	if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
4011
		ns.disk = device->new_state_tmp.disk;
4012
	cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
4013 4014
	if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
	    test_bit(NEW_CUR_UUID, &device->flags)) {
4015
		/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
4016
		   for temporal network outages! */
4017
		spin_unlock_irq(&device->resource->req_lock);
4018
		drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
4019
		tl_clear(peer_device->connection);
4020 4021
		drbd_uuid_new_current(device);
		clear_bit(NEW_CUR_UUID, &device->flags);
4022
		conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
4023
		return -EIO;
4024
	}
4025 4026
	rv = _drbd_set_state(device, ns, cs_flags, NULL);
	ns = drbd_read_state(device);
4027
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
4028 4029

	if (rv < SS_SUCCESS) {
4030
		conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4031
		return -EIO;
P
Philipp Reisner 已提交
4032 4033
	}

4034 4035
	if (os.conn > C_WF_REPORT_PARAMS) {
		if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
P
Philipp Reisner 已提交
4036 4037 4038 4039
		    peer_state.disk != D_NEGOTIATING ) {
			/* we want resync, peer has not yet decided to sync... */
			/* Nowadays only used when forcing a node into primary role and
			   setting its disk to UpToDate with that */
4040 4041
			drbd_send_uuids(peer_device);
			drbd_send_current_state(peer_device);
P
Philipp Reisner 已提交
4042 4043 4044
		}
	}

4045
	clear_bit(DISCARD_MY_DATA, &device->flags);
P
Philipp Reisner 已提交
4046

4047
	drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
P
Philipp Reisner 已提交
4048

4049
	return 0;
P
Philipp Reisner 已提交
4050 4051
}

4052
static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4053
{
4054
	struct drbd_peer_device *peer_device;
4055
	struct drbd_device *device;
4056
	struct p_rs_uuid *p = pi->data;
4057

4058 4059
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
4060
		return -EIO;
4061
	device = peer_device->device;
P
Philipp Reisner 已提交
4062

4063 4064 4065 4066 4067
	wait_event(device->misc_wait,
		   device->state.conn == C_WF_SYNC_UUID ||
		   device->state.conn == C_BEHIND ||
		   device->state.conn < C_CONNECTED ||
		   device->state.disk < D_NEGOTIATING);
P
Philipp Reisner 已提交
4068

4069
	/* D_ASSERT(device,  device->state.conn == C_WF_SYNC_UUID ); */
P
Philipp Reisner 已提交
4070 4071 4072

	/* Here the _drbd_uuid_ functions are right, current should
	   _not_ be rotated into the history */
4073 4074 4075
	if (get_ldev_if_state(device, D_NEGOTIATING)) {
		_drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
		_drbd_uuid_set(device, UI_BITMAP, 0UL);
P
Philipp Reisner 已提交
4076

4077 4078
		drbd_print_uuids(device, "updated sync uuid");
		drbd_start_resync(device, C_SYNC_TARGET);
P
Philipp Reisner 已提交
4079

4080
		put_ldev(device);
P
Philipp Reisner 已提交
4081
	} else
4082
		drbd_err(device, "Ignoring SyncUUID packet!\n");
P
Philipp Reisner 已提交
4083

4084
	return 0;
P
Philipp Reisner 已提交
4085 4086
}

4087 4088 4089 4090 4091 4092 4093
/**
 * receive_bitmap_plain
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
4094
receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
4095
		     unsigned long *p, struct bm_xfer_ctx *c)
P
Philipp Reisner 已提交
4096
{
4097
	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4098
				 drbd_header_size(peer_device->connection);
4099
	unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4100
				       c->bm_words - c->word_offset);
4101
	unsigned int want = num_words * sizeof(*p);
4102
	int err;
P
Philipp Reisner 已提交
4103

4104
	if (want != size) {
4105
		drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
4106
		return -EIO;
P
Philipp Reisner 已提交
4107 4108
	}
	if (want == 0)
4109
		return 0;
4110
	err = drbd_recv_all(peer_device->connection, p, want);
4111
	if (err)
4112
		return err;
P
Philipp Reisner 已提交
4113

4114
	drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
P
Philipp Reisner 已提交
4115 4116 4117 4118 4119 4120

	c->word_offset += num_words;
	c->bit_offset = c->word_offset * BITS_PER_LONG;
	if (c->bit_offset > c->bm_bits)
		c->bit_offset = c->bm_bits;

4121
	return 1;
P
Philipp Reisner 已提交
4122 4123
}

4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
{
	return (enum drbd_bitmap_code)(p->encoding & 0x0f);
}

static int dcbp_get_start(struct p_compressed_bm *p)
{
	return (p->encoding & 0x80) != 0;
}

static int dcbp_get_pad_bits(struct p_compressed_bm *p)
{
	return (p->encoding >> 4) & 0x7;
}

4139 4140 4141 4142 4143 4144 4145
/**
 * recv_bm_rle_bits
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
4146
recv_bm_rle_bits(struct drbd_peer_device *peer_device,
P
Philipp Reisner 已提交
4147
		struct p_compressed_bm *p,
4148 4149
		 struct bm_xfer_ctx *c,
		 unsigned int len)
P
Philipp Reisner 已提交
4150 4151 4152 4153 4154 4155 4156
{
	struct bitstream bs;
	u64 look_ahead;
	u64 rl;
	u64 tmp;
	unsigned long s = c->bit_offset;
	unsigned long e;
4157
	int toggle = dcbp_get_start(p);
P
Philipp Reisner 已提交
4158 4159 4160
	int have;
	int bits;

4161
	bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
P
Philipp Reisner 已提交
4162 4163 4164

	bits = bitstream_get_bits(&bs, &look_ahead, 64);
	if (bits < 0)
4165
		return -EIO;
P
Philipp Reisner 已提交
4166 4167 4168 4169

	for (have = bits; have > 0; s += rl, toggle = !toggle) {
		bits = vli_decode_bits(&rl, look_ahead);
		if (bits <= 0)
4170
			return -EIO;
P
Philipp Reisner 已提交
4171 4172 4173 4174

		if (toggle) {
			e = s + rl -1;
			if (e >= c->bm_bits) {
4175
				drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4176
				return -EIO;
P
Philipp Reisner 已提交
4177
			}
4178
			_drbd_bm_set_bits(peer_device->device, s, e);
P
Philipp Reisner 已提交
4179 4180 4181
		}

		if (have < bits) {
4182
			drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
P
Philipp Reisner 已提交
4183 4184 4185
				have, bits, look_ahead,
				(unsigned int)(bs.cur.b - p->code),
				(unsigned int)bs.buf_len);
4186
			return -EIO;
P
Philipp Reisner 已提交
4187
		}
4188 4189 4190 4191 4192
		/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
		if (likely(bits < 64))
			look_ahead >>= bits;
		else
			look_ahead = 0;
P
Philipp Reisner 已提交
4193 4194 4195 4196
		have -= bits;

		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
		if (bits < 0)
4197
			return -EIO;
P
Philipp Reisner 已提交
4198 4199 4200 4201 4202 4203 4204
		look_ahead |= tmp << have;
		have += bits;
	}

	c->bit_offset = s;
	bm_xfer_ctx_bit_to_word_offset(c);

4205
	return (s != c->bm_bits);
P
Philipp Reisner 已提交
4206 4207
}

4208 4209 4210 4211 4212 4213 4214
/**
 * decode_bitmap_c
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
4215
decode_bitmap_c(struct drbd_peer_device *peer_device,
P
Philipp Reisner 已提交
4216
		struct p_compressed_bm *p,
4217 4218
		struct bm_xfer_ctx *c,
		unsigned int len)
P
Philipp Reisner 已提交
4219
{
4220
	if (dcbp_get_code(p) == RLE_VLI_Bits)
4221
		return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
P
Philipp Reisner 已提交
4222 4223 4224 4225 4226

	/* other variants had been implemented for evaluation,
	 * but have been dropped as this one turned out to be "best"
	 * during all our tests. */

4227 4228
	drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
	conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4229
	return -EIO;
P
Philipp Reisner 已提交
4230 4231
}

4232
void INFO_bm_xfer_stats(struct drbd_device *device,
P
Philipp Reisner 已提交
4233 4234 4235
		const char *direction, struct bm_xfer_ctx *c)
{
	/* what would it take to transfer it "plaintext" */
4236
	unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
4237 4238 4239 4240 4241 4242
	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
	unsigned int plain =
		header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
		c->bm_words * sizeof(unsigned long);
	unsigned int total = c->bytes[0] + c->bytes[1];
	unsigned int r;
P
Philipp Reisner 已提交
4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259

	/* total can not be zero. but just in case: */
	if (total == 0)
		return;

	/* don't report if not compressed */
	if (total >= plain)
		return;

	/* total < plain. check for overflow, still */
	r = (total > UINT_MAX/1000) ? (total / (plain/1000))
		                    : (1000 * total / plain);

	if (r > 1000)
		r = 1000;

	r = 1000 - r;
4260
	drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
P
Philipp Reisner 已提交
4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275
	     "total %u; compression: %u.%u%%\n",
			direction,
			c->bytes[1], c->packets[1],
			c->bytes[0], c->packets[0],
			total, r/10, r % 10);
}

/* Since we are processing the bitfield from lower addresses to higher,
   it does not matter if the process it in 32 bit chunks or 64 bit
   chunks as long as it is little endian. (Understand it as byte stream,
   beginning with the lowest byte...) If we would use big endian
   we would need to process it from the highest address to the lowest,
   in order to be agnostic to the 32 vs 64 bits issue.

   returns 0 on failure, 1 if we successfully received it. */
4276
static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4277
{
4278
	struct drbd_peer_device *peer_device;
4279
	struct drbd_device *device;
P
Philipp Reisner 已提交
4280
	struct bm_xfer_ctx c;
4281
	int err;
4282

4283 4284
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
4285
		return -EIO;
4286
	device = peer_device->device;
P
Philipp Reisner 已提交
4287

4288
	drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4289 4290
	/* you are supposed to send additional out-of-sync information
	 * if you actually set bits during this phase */
P
Philipp Reisner 已提交
4291 4292

	c = (struct bm_xfer_ctx) {
4293 4294
		.bm_bits = drbd_bm_bits(device),
		.bm_words = drbd_bm_words(device),
P
Philipp Reisner 已提交
4295 4296
	};

4297
	for(;;) {
4298
		if (pi->cmd == P_BITMAP)
4299
			err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
4300
		else if (pi->cmd == P_COMPRESSED_BITMAP) {
P
Philipp Reisner 已提交
4301 4302
			/* MAYBE: sanity check that we speak proto >= 90,
			 * and the feature is enabled! */
4303
			struct p_compressed_bm *p = pi->data;
P
Philipp Reisner 已提交
4304

4305
			if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
4306
				drbd_err(device, "ReportCBitmap packet too large\n");
4307
				err = -EIO;
P
Philipp Reisner 已提交
4308 4309
				goto out;
			}
4310
			if (pi->size <= sizeof(*p)) {
4311
				drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4312
				err = -EIO;
4313
				goto out;
P
Philipp Reisner 已提交
4314
			}
4315
			err = drbd_recv_all(peer_device->connection, p, pi->size);
4316 4317
			if (err)
			       goto out;
4318
			err = decode_bitmap_c(peer_device, p, &c, pi->size);
P
Philipp Reisner 已提交
4319
		} else {
4320
			drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4321
			err = -EIO;
P
Philipp Reisner 已提交
4322 4323 4324
			goto out;
		}

4325
		c.packets[pi->cmd == P_BITMAP]++;
4326
		c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
P
Philipp Reisner 已提交
4327

4328 4329 4330
		if (err <= 0) {
			if (err < 0)
				goto out;
P
Philipp Reisner 已提交
4331
			break;
4332
		}
4333
		err = drbd_recv_header(peer_device->connection, pi);
4334
		if (err)
P
Philipp Reisner 已提交
4335
			goto out;
4336
	}
P
Philipp Reisner 已提交
4337

4338
	INFO_bm_xfer_stats(device, "receive", &c);
P
Philipp Reisner 已提交
4339

4340
	if (device->state.conn == C_WF_BITMAP_T) {
4341 4342
		enum drbd_state_rv rv;

4343
		err = drbd_send_bitmap(device);
4344
		if (err)
P
Philipp Reisner 已提交
4345 4346
			goto out;
		/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4347
		rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4348
		D_ASSERT(device, rv == SS_SUCCESS);
4349
	} else if (device->state.conn != C_WF_BITMAP_S) {
P
Philipp Reisner 已提交
4350 4351
		/* admin may have requested C_DISCONNECTING,
		 * other threads may have noticed network errors */
4352
		drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
4353
		    drbd_conn_str(device->state.conn));
P
Philipp Reisner 已提交
4354
	}
4355
	err = 0;
P
Philipp Reisner 已提交
4356 4357

 out:
4358 4359 4360
	drbd_bm_unlock(device);
	if (!err && device->state.conn == C_WF_BITMAP_S)
		drbd_start_resync(device, C_SYNC_SOURCE);
4361
	return err;
P
Philipp Reisner 已提交
4362 4363
}

4364
static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4365
{
4366
	drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
4367
		 pi->cmd, pi->size);
P
Philipp Reisner 已提交
4368

4369
	return ignore_remaining_packet(connection, pi);
P
Philipp Reisner 已提交
4370 4371
}

4372
static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
4373
{
4374 4375
	/* Make sure we've acked all the TCP data associated
	 * with the data requests being unplugged */
4376
	drbd_tcp_quickack(connection->data.socket);
4377

4378
	return 0;
4379 4380
}

4381
static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
4382
{
4383
	struct drbd_peer_device *peer_device;
4384
	struct drbd_device *device;
4385
	struct p_block_desc *p = pi->data;
4386

4387 4388
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
4389
		return -EIO;
4390
	device = peer_device->device;
4391

4392
	switch (device->state.conn) {
4393 4394 4395 4396 4397
	case C_WF_SYNC_UUID:
	case C_WF_BITMAP_T:
	case C_BEHIND:
			break;
	default:
4398
		drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4399
				drbd_conn_str(device->state.conn));
4400 4401
	}

4402
	drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4403

4404
	return 0;
4405 4406
}

4407 4408 4409
struct data_cmd {
	int expect_payload;
	size_t pkt_size;
4410
	int (*fn)(struct drbd_connection *, struct packet_info *);
4411 4412 4413 4414 4415 4416 4417
};

static struct data_cmd drbd_cmd_handler[] = {
	[P_DATA]	    = { 1, sizeof(struct p_data), receive_Data },
	[P_DATA_REPLY]	    = { 1, sizeof(struct p_data), receive_DataReply },
	[P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
	[P_BARRIER]	    = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4418 4419 4420
	[P_BITMAP]	    = { 1, 0, receive_bitmap } ,
	[P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
	[P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
4421 4422
	[P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
	[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4423 4424
	[P_SYNC_PARAM]	    = { 1, 0, receive_SyncParam },
	[P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
4425 4426 4427 4428 4429 4430 4431 4432 4433 4434
	[P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
	[P_UUIDS]	    = { 0, sizeof(struct p_uuids), receive_uuids },
	[P_SIZES]	    = { 0, sizeof(struct p_sizes), receive_sizes },
	[P_STATE]	    = { 0, sizeof(struct p_state), receive_state },
	[P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
	[P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
	[P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
	[P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
	[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
	[P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
4435
	[P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4436
	[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4437
	[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
P
Philipp Reisner 已提交
4438 4439
};

4440
static void drbdd(struct drbd_connection *connection)
P
Philipp Reisner 已提交
4441
{
4442
	struct packet_info pi;
4443
	size_t shs; /* sub header size */
4444
	int err;
P
Philipp Reisner 已提交
4445

4446
	while (get_t_state(&connection->receiver) == RUNNING) {
4447
		struct data_cmd *cmd;
P
Philipp Reisner 已提交
4448

4449 4450
		drbd_thread_current_set_cpu(&connection->receiver);
		if (drbd_recv_header(connection, &pi))
4451
			goto err_out;
P
Philipp Reisner 已提交
4452

4453
		cmd = &drbd_cmd_handler[pi.cmd];
4454
		if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4455
			drbd_err(connection, "Unexpected data packet %s (0x%04x)",
4456
				 cmdname(pi.cmd), pi.cmd);
4457
			goto err_out;
4458
		}
P
Philipp Reisner 已提交
4459

4460 4461
		shs = cmd->pkt_size;
		if (pi.size > shs && !cmd->expect_payload) {
4462
			drbd_err(connection, "No payload expected %s l:%d\n",
4463
				 cmdname(pi.cmd), pi.size);
4464
			goto err_out;
P
Philipp Reisner 已提交
4465 4466
		}

4467
		if (shs) {
4468
			err = drbd_recv_all_warn(connection, pi.data, shs);
4469
			if (err)
4470
				goto err_out;
4471
			pi.size -= shs;
4472 4473
		}

4474
		err = cmd->fn(connection, &pi);
4475
		if (err) {
4476
			drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
4477
				 cmdname(pi.cmd), err, pi.size);
4478
			goto err_out;
P
Philipp Reisner 已提交
4479 4480
		}
	}
4481
	return;
P
Philipp Reisner 已提交
4482

4483
    err_out:
4484
	conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
P
Philipp Reisner 已提交
4485 4486
}

4487
static void conn_disconnect(struct drbd_connection *connection)
P
Philipp Reisner 已提交
4488
{
4489
	struct drbd_peer_device *peer_device;
4490
	enum drbd_conns oc;
P
Philipp Reisner 已提交
4491
	int vnr;
P
Philipp Reisner 已提交
4492

4493
	if (connection->cstate == C_STANDALONE)
P
Philipp Reisner 已提交
4494 4495
		return;

4496 4497 4498 4499 4500
	/* We are about to start the cleanup after connection loss.
	 * Make sure drbd_make_request knows about that.
	 * Usually we should be in some network failure state already,
	 * but just in case we are not, we fix it up here.
	 */
4501
	conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4502

P
Philipp Reisner 已提交
4503
	/* asender does not clean up anything. it must not interfere, either */
4504 4505
	drbd_thread_stop(&connection->asender);
	drbd_free_sock(connection);
4506

P
Philipp Reisner 已提交
4507
	rcu_read_lock();
4508 4509
	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
		struct drbd_device *device = peer_device->device;
4510
		kref_get(&device->kref);
P
Philipp Reisner 已提交
4511
		rcu_read_unlock();
4512
		drbd_disconnected(peer_device);
4513
		kref_put(&device->kref, drbd_destroy_device);
P
Philipp Reisner 已提交
4514 4515 4516 4517
		rcu_read_lock();
	}
	rcu_read_unlock();

4518
	if (!list_empty(&connection->current_epoch->list))
4519
		drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
4520
	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4521 4522
	atomic_set(&connection->current_epoch->epoch_size, 0);
	connection->send.seen_any_write_yet = false;
4523

4524
	drbd_info(connection, "Connection closed\n");
4525

4526 4527
	if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
		conn_try_outdate_peer_async(connection);
4528

4529
	spin_lock_irq(&connection->resource->req_lock);
4530
	oc = connection->cstate;
4531
	if (oc >= C_UNCONNECTED)
4532
		_conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4533

4534
	spin_unlock_irq(&connection->resource->req_lock);
4535

4536
	if (oc == C_DISCONNECTING)
4537
		conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4538 4539
}

4540
static int drbd_disconnected(struct drbd_peer_device *peer_device)
4541
{
4542
	struct drbd_device *device = peer_device->device;
4543
	unsigned int i;
P
Philipp Reisner 已提交
4544

4545
	/* wait for current activity to cease. */
4546
	spin_lock_irq(&device->resource->req_lock);
4547 4548 4549
	_drbd_wait_ee_list_empty(device, &device->active_ee);
	_drbd_wait_ee_list_empty(device, &device->sync_ee);
	_drbd_wait_ee_list_empty(device, &device->read_ee);
4550
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561

	/* We do not have data structures that would allow us to
	 * get the rs_pending_cnt down to 0 again.
	 *  * On C_SYNC_TARGET we do not have any data structures describing
	 *    the pending RSDataRequest's we have sent.
	 *  * On C_SYNC_SOURCE there is no data structure that tracks
	 *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
	 *  And no, it is not the sum of the reference counts in the
	 *  resync_LRU. The resync_LRU tracks the whole operation including
	 *  the disk-IO, while the rs_pending_cnt only tracks the blocks
	 *  on the fly. */
4562 4563 4564 4565 4566
	drbd_rs_cancel_all(device);
	device->rs_total = 0;
	device->rs_failed = 0;
	atomic_set(&device->rs_pending_cnt, 0);
	wake_up(&device->misc_wait);
P
Philipp Reisner 已提交
4567

4568 4569
	del_timer_sync(&device->resync_timer);
	resync_timer_fn((unsigned long)device);
P
Philipp Reisner 已提交
4570 4571 4572 4573

	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
	 * w_make_resync_request etc. which may still be on the worker queue
	 * to be "canceled" */
4574
	drbd_flush_workqueue(&peer_device->connection->sender_work);
P
Philipp Reisner 已提交
4575

4576
	drbd_finish_peer_reqs(device);
P
Philipp Reisner 已提交
4577

4578 4579 4580
	/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
	   might have issued a work again. The one before drbd_finish_peer_reqs() is
	   necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4581
	drbd_flush_workqueue(&peer_device->connection->sender_work);
4582

4583 4584
	/* need to do it again, drbd_finish_peer_reqs() may have populated it
	 * again via drbd_try_clear_on_disk_bm(). */
4585
	drbd_rs_cancel_all(device);
P
Philipp Reisner 已提交
4586

4587 4588
	kfree(device->p_uuid);
	device->p_uuid = NULL;
P
Philipp Reisner 已提交
4589

4590
	if (!drbd_suspended(device))
4591
		tl_clear(peer_device->connection);
P
Philipp Reisner 已提交
4592

4593
	drbd_md_sync(device);
P
Philipp Reisner 已提交
4594

4595 4596
	/* serialize with bitmap writeout triggered by the state change,
	 * if any. */
4597
	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4598

P
Philipp Reisner 已提交
4599 4600 4601 4602 4603 4604 4605
	/* tcp_close and release of sendpage pages can be deferred.  I don't
	 * want to use SO_LINGER, because apparently it can be deferred for
	 * more than 20 seconds (longest time I checked).
	 *
	 * Actually we don't care for exactly when the network stack does its
	 * put_page(), but release our reference on these pages right here.
	 */
4606
	i = drbd_free_peer_reqs(device, &device->net_ee);
P
Philipp Reisner 已提交
4607
	if (i)
4608
		drbd_info(device, "net_ee not empty, killed %u entries\n", i);
4609
	i = atomic_read(&device->pp_in_use_by_net);
4610
	if (i)
4611
		drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
4612
	i = atomic_read(&device->pp_in_use);
P
Philipp Reisner 已提交
4613
	if (i)
4614
		drbd_info(device, "pp_in_use = %d, expected 0\n", i);
P
Philipp Reisner 已提交
4615

4616 4617 4618 4619
	D_ASSERT(device, list_empty(&device->read_ee));
	D_ASSERT(device, list_empty(&device->active_ee));
	D_ASSERT(device, list_empty(&device->sync_ee));
	D_ASSERT(device, list_empty(&device->done_ee));
P
Philipp Reisner 已提交
4620

4621
	return 0;
P
Philipp Reisner 已提交
4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632
}

/*
 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
 * we can agree on is stored in agreed_pro_version.
 *
 * feature flags and the reserved array should be enough room for future
 * enhancements of the handshake protocol, and possible plugins...
 *
 * for now, they are expected to be zero, but ignored.
 */
4633
static int drbd_send_features(struct drbd_connection *connection)
P
Philipp Reisner 已提交
4634
{
4635 4636
	struct drbd_socket *sock;
	struct p_connection_features *p;
P
Philipp Reisner 已提交
4637

4638 4639
	sock = &connection->data;
	p = conn_prepare_command(connection, sock);
4640
	if (!p)
4641
		return -EIO;
P
Philipp Reisner 已提交
4642 4643 4644
	memset(p, 0, sizeof(*p));
	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4645
	return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
P
Philipp Reisner 已提交
4646 4647 4648 4649 4650 4651 4652 4653 4654
}

/*
 * return values:
 *   1 yes, we have a valid connection
 *   0 oops, did not work out, please try again
 *  -1 peer talks different language,
 *     no point in trying again, please go standalone.
 */
4655
static int drbd_do_features(struct drbd_connection *connection)
P
Philipp Reisner 已提交
4656
{
4657
	/* ASSERT current == connection->receiver ... */
4658 4659
	struct p_connection_features *p;
	const int expect = sizeof(struct p_connection_features);
4660
	struct packet_info pi;
4661
	int err;
P
Philipp Reisner 已提交
4662

4663
	err = drbd_send_features(connection);
4664
	if (err)
P
Philipp Reisner 已提交
4665 4666
		return 0;

4667
	err = drbd_recv_header(connection, &pi);
4668
	if (err)
P
Philipp Reisner 已提交
4669 4670
		return 0;

4671
	if (pi.cmd != P_CONNECTION_FEATURES) {
4672
		drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4673
			 cmdname(pi.cmd), pi.cmd);
P
Philipp Reisner 已提交
4674 4675 4676
		return -1;
	}

4677
	if (pi.size != expect) {
4678
		drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
4679
		     expect, pi.size);
P
Philipp Reisner 已提交
4680 4681 4682
		return -1;
	}

4683
	p = pi.data;
4684
	err = drbd_recv_all_warn(connection, p, expect);
4685
	if (err)
P
Philipp Reisner 已提交
4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696
		return 0;

	p->protocol_min = be32_to_cpu(p->protocol_min);
	p->protocol_max = be32_to_cpu(p->protocol_max);
	if (p->protocol_max == 0)
		p->protocol_max = p->protocol_min;

	if (PRO_VERSION_MAX < p->protocol_min ||
	    PRO_VERSION_MIN > p->protocol_max)
		goto incompat;

4697
	connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
P
Philipp Reisner 已提交
4698

4699
	drbd_info(connection, "Handshake successful: "
4700
	     "Agreed network protocol version %d\n", connection->agreed_pro_version);
P
Philipp Reisner 已提交
4701 4702 4703 4704

	return 1;

 incompat:
4705
	drbd_err(connection, "incompatible DRBD dialects: "
P
Philipp Reisner 已提交
4706 4707 4708 4709 4710 4711 4712
	    "I support %d-%d, peer supports %d-%d\n",
	    PRO_VERSION_MIN, PRO_VERSION_MAX,
	    p->protocol_min, p->protocol_max);
	return -1;
}

#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4713
static int drbd_do_auth(struct drbd_connection *connection)
P
Philipp Reisner 已提交
4714
{
4715 4716
	drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
	drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4717
	return -1;
P
Philipp Reisner 已提交
4718 4719 4720
}
#else
#define CHALLENGE_LEN 64
4721 4722 4723 4724 4725 4726 4727

/* Return value:
	1 - auth succeeded,
	0 - failed, try again (network error),
	-1 - auth failed, don't try again.
*/

4728
static int drbd_do_auth(struct drbd_connection *connection)
P
Philipp Reisner 已提交
4729
{
4730
	struct drbd_socket *sock;
P
Philipp Reisner 已提交
4731 4732 4733 4734 4735
	char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
	struct scatterlist sg;
	char *response = NULL;
	char *right_response = NULL;
	char *peers_ch = NULL;
4736 4737
	unsigned int key_len;
	char secret[SHARED_SECRET_MAX]; /* 64 byte */
P
Philipp Reisner 已提交
4738 4739
	unsigned int resp_size;
	struct hash_desc desc;
4740
	struct packet_info pi;
4741
	struct net_conf *nc;
4742
	int err, rv;
P
Philipp Reisner 已提交
4743

4744
	/* FIXME: Put the challenge/response into the preallocated socket buffer.  */
P
Philipp Reisner 已提交
4745

4746
	rcu_read_lock();
4747
	nc = rcu_dereference(connection->net_conf);
4748 4749 4750 4751
	key_len = strlen(nc->shared_secret);
	memcpy(secret, nc->shared_secret, key_len);
	rcu_read_unlock();

4752
	desc.tfm = connection->cram_hmac_tfm;
P
Philipp Reisner 已提交
4753 4754
	desc.flags = 0;

4755
	rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
P
Philipp Reisner 已提交
4756
	if (rv) {
4757
		drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
4758
		rv = -1;
P
Philipp Reisner 已提交
4759 4760 4761 4762 4763
		goto fail;
	}

	get_random_bytes(my_challenge, CHALLENGE_LEN);

4764 4765
	sock = &connection->data;
	if (!conn_prepare_command(connection, sock)) {
4766 4767 4768
		rv = 0;
		goto fail;
	}
4769
	rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
4770
				my_challenge, CHALLENGE_LEN);
P
Philipp Reisner 已提交
4771 4772 4773
	if (!rv)
		goto fail;

4774
	err = drbd_recv_header(connection, &pi);
4775 4776
	if (err) {
		rv = 0;
P
Philipp Reisner 已提交
4777
		goto fail;
4778
	}
P
Philipp Reisner 已提交
4779

4780
	if (pi.cmd != P_AUTH_CHALLENGE) {
4781
		drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4782
			 cmdname(pi.cmd), pi.cmd);
P
Philipp Reisner 已提交
4783 4784 4785 4786
		rv = 0;
		goto fail;
	}

4787
	if (pi.size > CHALLENGE_LEN * 2) {
4788
		drbd_err(connection, "expected AuthChallenge payload too big.\n");
4789
		rv = -1;
P
Philipp Reisner 已提交
4790 4791 4792
		goto fail;
	}

4793
	peers_ch = kmalloc(pi.size, GFP_NOIO);
P
Philipp Reisner 已提交
4794
	if (peers_ch == NULL) {
4795
		drbd_err(connection, "kmalloc of peers_ch failed\n");
4796
		rv = -1;
P
Philipp Reisner 已提交
4797 4798 4799
		goto fail;
	}

4800
	err = drbd_recv_all_warn(connection, peers_ch, pi.size);
4801
	if (err) {
P
Philipp Reisner 已提交
4802 4803 4804 4805
		rv = 0;
		goto fail;
	}

4806
	resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
P
Philipp Reisner 已提交
4807 4808
	response = kmalloc(resp_size, GFP_NOIO);
	if (response == NULL) {
4809
		drbd_err(connection, "kmalloc of response failed\n");
4810
		rv = -1;
P
Philipp Reisner 已提交
4811 4812 4813 4814
		goto fail;
	}

	sg_init_table(&sg, 1);
4815
	sg_set_buf(&sg, peers_ch, pi.size);
P
Philipp Reisner 已提交
4816 4817 4818

	rv = crypto_hash_digest(&desc, &sg, sg.length, response);
	if (rv) {
4819
		drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
4820
		rv = -1;
P
Philipp Reisner 已提交
4821 4822 4823
		goto fail;
	}

4824
	if (!conn_prepare_command(connection, sock)) {
4825
		rv = 0;
P
Philipp Reisner 已提交
4826
		goto fail;
4827
	}
4828
	rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
4829
				response, resp_size);
P
Philipp Reisner 已提交
4830 4831 4832
	if (!rv)
		goto fail;

4833
	err = drbd_recv_header(connection, &pi);
4834
	if (err) {
P
Philipp Reisner 已提交
4835 4836 4837 4838
		rv = 0;
		goto fail;
	}

4839
	if (pi.cmd != P_AUTH_RESPONSE) {
4840
		drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
4841
			 cmdname(pi.cmd), pi.cmd);
P
Philipp Reisner 已提交
4842 4843 4844 4845
		rv = 0;
		goto fail;
	}

4846
	if (pi.size != resp_size) {
4847
		drbd_err(connection, "expected AuthResponse payload of wrong size\n");
P
Philipp Reisner 已提交
4848 4849 4850 4851
		rv = 0;
		goto fail;
	}

4852
	err = drbd_recv_all_warn(connection, response , resp_size);
4853
	if (err) {
P
Philipp Reisner 已提交
4854 4855 4856 4857 4858
		rv = 0;
		goto fail;
	}

	right_response = kmalloc(resp_size, GFP_NOIO);
4859
	if (right_response == NULL) {
4860
		drbd_err(connection, "kmalloc of right_response failed\n");
4861
		rv = -1;
P
Philipp Reisner 已提交
4862 4863 4864 4865 4866 4867 4868
		goto fail;
	}

	sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);

	rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
	if (rv) {
4869
		drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
4870
		rv = -1;
P
Philipp Reisner 已提交
4871 4872 4873 4874 4875 4876
		goto fail;
	}

	rv = !memcmp(response, right_response, resp_size);

	if (rv)
4877
		drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
4878
		     resp_size);
4879 4880
	else
		rv = -1;
P
Philipp Reisner 已提交
4881 4882 4883 4884 4885 4886 4887 4888 4889 4890

 fail:
	kfree(peers_ch);
	kfree(response);
	kfree(right_response);

	return rv;
}
#endif

4891
int drbd_receiver(struct drbd_thread *thi)
P
Philipp Reisner 已提交
4892
{
4893
	struct drbd_connection *connection = thi->connection;
P
Philipp Reisner 已提交
4894 4895
	int h;

4896
	drbd_info(connection, "receiver (re)started\n");
P
Philipp Reisner 已提交
4897 4898

	do {
4899
		h = conn_connect(connection);
P
Philipp Reisner 已提交
4900
		if (h == 0) {
4901
			conn_disconnect(connection);
4902
			schedule_timeout_interruptible(HZ);
P
Philipp Reisner 已提交
4903 4904
		}
		if (h == -1) {
4905
			drbd_warn(connection, "Discarding network configuration.\n");
4906
			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
P
Philipp Reisner 已提交
4907 4908 4909
		}
	} while (h == 0);

4910
	if (h > 0)
4911
		drbdd(connection);
P
Philipp Reisner 已提交
4912

4913
	conn_disconnect(connection);
P
Philipp Reisner 已提交
4914

4915
	drbd_info(connection, "receiver terminated\n");
P
Philipp Reisner 已提交
4916 4917 4918 4919 4920
	return 0;
}

/* ********* acknowledge sender ******** */

4921
static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4922
{
4923
	struct p_req_state_reply *p = pi->data;
4924 4925 4926
	int retcode = be32_to_cpu(p->retcode);

	if (retcode >= SS_SUCCESS) {
4927
		set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
4928
	} else {
4929
		set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
4930
		drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
4931 4932
			 drbd_set_st_err_str(retcode), retcode);
	}
4933
	wake_up(&connection->ping_wait);
4934

4935
	return 0;
4936
}
P
Philipp Reisner 已提交
4937

4938
static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4939
{
4940
	struct drbd_peer_device *peer_device;
4941
	struct drbd_device *device;
4942
	struct p_req_state_reply *p = pi->data;
P
Philipp Reisner 已提交
4943 4944
	int retcode = be32_to_cpu(p->retcode);

4945 4946
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
4947
		return -EIO;
4948
	device = peer_device->device;
4949

4950
	if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
4951
		D_ASSERT(device, connection->agreed_pro_version < 100);
4952
		return got_conn_RqSReply(connection, pi);
4953 4954
	}

P
Philipp Reisner 已提交
4955
	if (retcode >= SS_SUCCESS) {
4956
		set_bit(CL_ST_CHG_SUCCESS, &device->flags);
P
Philipp Reisner 已提交
4957
	} else {
4958
		set_bit(CL_ST_CHG_FAIL, &device->flags);
4959
		drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
4960
			drbd_set_st_err_str(retcode), retcode);
P
Philipp Reisner 已提交
4961
	}
4962
	wake_up(&device->state_wait);
P
Philipp Reisner 已提交
4963

4964
	return 0;
P
Philipp Reisner 已提交
4965 4966
}

4967
static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4968
{
4969
	return drbd_send_ping_ack(connection);
P
Philipp Reisner 已提交
4970 4971 4972

}

4973
static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4974 4975
{
	/* restore idle timeout */
4976 4977 4978
	connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
	if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
		wake_up(&connection->ping_wait);
P
Philipp Reisner 已提交
4979

4980
	return 0;
P
Philipp Reisner 已提交
4981 4982
}

4983
static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
4984
{
4985
	struct drbd_peer_device *peer_device;
4986
	struct drbd_device *device;
4987
	struct p_block_ack *p = pi->data;
P
Philipp Reisner 已提交
4988 4989 4990
	sector_t sector = be64_to_cpu(p->sector);
	int blksize = be32_to_cpu(p->blksize);

4991 4992
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
4993
		return -EIO;
4994
	device = peer_device->device;
4995

4996
	D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
P
Philipp Reisner 已提交
4997

4998
	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
P
Philipp Reisner 已提交
4999

5000 5001 5002
	if (get_ldev(device)) {
		drbd_rs_complete_io(device, sector);
		drbd_set_in_sync(device, sector, blksize);
5003
		/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
5004 5005
		device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
		put_ldev(device);
5006
	}
5007 5008
	dec_rs_pending(device);
	atomic_add(blksize >> 9, &device->rs_sect_in);
P
Philipp Reisner 已提交
5009

5010
	return 0;
P
Philipp Reisner 已提交
5011 5012
}

5013
static int
5014
validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
5015 5016
			      struct rb_root *root, const char *func,
			      enum drbd_req_event what, bool missing_ok)
P
Philipp Reisner 已提交
5017 5018 5019 5020
{
	struct drbd_request *req;
	struct bio_and_error m;

5021
	spin_lock_irq(&device->resource->req_lock);
5022
	req = find_request(device, root, id, sector, missing_ok, func);
P
Philipp Reisner 已提交
5023
	if (unlikely(!req)) {
5024
		spin_unlock_irq(&device->resource->req_lock);
5025
		return -EIO;
P
Philipp Reisner 已提交
5026 5027
	}
	__req_mod(req, what, &m);
5028
	spin_unlock_irq(&device->resource->req_lock);
P
Philipp Reisner 已提交
5029 5030

	if (m.bio)
5031
		complete_master_bio(device, &m);
5032
	return 0;
P
Philipp Reisner 已提交
5033 5034
}

5035
static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
5036
{
5037
	struct drbd_peer_device *peer_device;
5038
	struct drbd_device *device;
5039
	struct p_block_ack *p = pi->data;
P
Philipp Reisner 已提交
5040 5041 5042 5043
	sector_t sector = be64_to_cpu(p->sector);
	int blksize = be32_to_cpu(p->blksize);
	enum drbd_req_event what;

5044 5045
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
5046
		return -EIO;
5047
	device = peer_device->device;
5048

5049
	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
P
Philipp Reisner 已提交
5050

5051
	if (p->block_id == ID_SYNCER) {
5052 5053
		drbd_set_in_sync(device, sector, blksize);
		dec_rs_pending(device);
5054
		return 0;
P
Philipp Reisner 已提交
5055
	}
5056
	switch (pi->cmd) {
P
Philipp Reisner 已提交
5057
	case P_RS_WRITE_ACK:
5058
		what = WRITE_ACKED_BY_PEER_AND_SIS;
P
Philipp Reisner 已提交
5059 5060
		break;
	case P_WRITE_ACK:
5061
		what = WRITE_ACKED_BY_PEER;
P
Philipp Reisner 已提交
5062 5063
		break;
	case P_RECV_ACK:
5064
		what = RECV_ACKED_BY_PEER;
P
Philipp Reisner 已提交
5065
		break;
5066 5067
	case P_SUPERSEDED:
		what = CONFLICT_RESOLVED;
P
Philipp Reisner 已提交
5068
		break;
5069 5070
	case P_RETRY_WRITE:
		what = POSTPONE_WRITE;
P
Philipp Reisner 已提交
5071 5072
		break;
	default:
5073
		BUG();
P
Philipp Reisner 已提交
5074 5075
	}

5076 5077
	return validate_req_change_req_state(device, p->block_id, sector,
					     &device->write_requests, __func__,
5078
					     what, false);
P
Philipp Reisner 已提交
5079 5080
}

5081
static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
5082
{
5083
	struct drbd_peer_device *peer_device;
5084
	struct drbd_device *device;
5085
	struct p_block_ack *p = pi->data;
P
Philipp Reisner 已提交
5086
	sector_t sector = be64_to_cpu(p->sector);
5087
	int size = be32_to_cpu(p->blksize);
5088
	int err;
P
Philipp Reisner 已提交
5089

5090 5091
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
5092
		return -EIO;
5093
	device = peer_device->device;
P
Philipp Reisner 已提交
5094

5095
	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
P
Philipp Reisner 已提交
5096

5097
	if (p->block_id == ID_SYNCER) {
5098 5099
		dec_rs_pending(device);
		drbd_rs_failed_io(device, sector, size);
5100
		return 0;
P
Philipp Reisner 已提交
5101
	}
5102

5103 5104
	err = validate_req_change_req_state(device, p->block_id, sector,
					    &device->write_requests, __func__,
5105
					    NEG_ACKED, true);
5106
	if (err) {
5107 5108 5109 5110 5111
		/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
		   The master bio might already be completed, therefore the
		   request is no longer in the collision hash. */
		/* In Protocol B we might already have got a P_RECV_ACK
		   but then get a P_NEG_ACK afterwards. */
5112
		drbd_set_out_of_sync(device, sector, size);
5113
	}
5114
	return 0;
P
Philipp Reisner 已提交
5115 5116
}

5117
static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
5118
{
5119
	struct drbd_peer_device *peer_device;
5120
	struct drbd_device *device;
5121
	struct p_block_ack *p = pi->data;
P
Philipp Reisner 已提交
5122 5123
	sector_t sector = be64_to_cpu(p->sector);

5124 5125
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
5126
		return -EIO;
5127
	device = peer_device->device;
5128

5129
	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5130

5131
	drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
P
Philipp Reisner 已提交
5132 5133
	    (unsigned long long)sector, be32_to_cpu(p->blksize));

5134 5135
	return validate_req_change_req_state(device, p->block_id, sector,
					     &device->read_requests, __func__,
5136
					     NEG_ACKED, false);
P
Philipp Reisner 已提交
5137 5138
}

5139
static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
5140
{
5141
	struct drbd_peer_device *peer_device;
5142
	struct drbd_device *device;
P
Philipp Reisner 已提交
5143 5144
	sector_t sector;
	int size;
5145
	struct p_block_ack *p = pi->data;
5146

5147 5148
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
5149
		return -EIO;
5150
	device = peer_device->device;
P
Philipp Reisner 已提交
5151 5152 5153 5154

	sector = be64_to_cpu(p->sector);
	size = be32_to_cpu(p->blksize);

5155
	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
P
Philipp Reisner 已提交
5156

5157
	dec_rs_pending(device);
P
Philipp Reisner 已提交
5158

5159 5160
	if (get_ldev_if_state(device, D_FAILED)) {
		drbd_rs_complete_io(device, sector);
5161
		switch (pi->cmd) {
5162
		case P_NEG_RS_DREPLY:
5163
			drbd_rs_failed_io(device, sector, size);
5164 5165 5166
		case P_RS_CANCEL:
			break;
		default:
5167
			BUG();
5168
		}
5169
		put_ldev(device);
P
Philipp Reisner 已提交
5170 5171
	}

5172
	return 0;
P
Philipp Reisner 已提交
5173 5174
}

5175
static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
5176
{
5177
	struct p_barrier_ack *p = pi->data;
5178
	struct drbd_peer_device *peer_device;
5179
	int vnr;
5180

5181
	tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
P
Philipp Reisner 已提交
5182

5183
	rcu_read_lock();
5184 5185 5186
	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
		struct drbd_device *device = peer_device->device;

5187 5188 5189 5190 5191
		if (device->state.conn == C_AHEAD &&
		    atomic_read(&device->ap_in_flight) == 0 &&
		    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
			device->start_resync_timer.expires = jiffies + HZ;
			add_timer(&device->start_resync_timer);
5192
		}
5193
	}
5194
	rcu_read_unlock();
5195

5196
	return 0;
P
Philipp Reisner 已提交
5197 5198
}

5199
static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
P
Philipp Reisner 已提交
5200
{
5201
	struct drbd_peer_device *peer_device;
5202
	struct drbd_device *device;
5203
	struct p_block_ack *p = pi->data;
5204
	struct drbd_device_work *dw;
P
Philipp Reisner 已提交
5205 5206 5207
	sector_t sector;
	int size;

5208 5209
	peer_device = conn_peer_device(connection, pi->vnr);
	if (!peer_device)
5210
		return -EIO;
5211
	device = peer_device->device;
5212

P
Philipp Reisner 已提交
5213 5214 5215
	sector = be64_to_cpu(p->sector);
	size = be32_to_cpu(p->blksize);

5216
	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
P
Philipp Reisner 已提交
5217 5218

	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5219
		drbd_ov_out_of_sync_found(device, sector, size);
P
Philipp Reisner 已提交
5220
	else
5221
		ov_out_of_sync_print(device);
P
Philipp Reisner 已提交
5222

5223
	if (!get_ldev(device))
5224
		return 0;
5225

5226 5227
	drbd_rs_complete_io(device, sector);
	dec_rs_pending(device);
P
Philipp Reisner 已提交
5228

5229
	--device->ov_left;
5230 5231

	/* let's advance progress step marks only for every other megabyte */
5232 5233
	if ((device->ov_left & 0x200) == 0x200)
		drbd_advance_rs_marks(device, device->ov_left);
5234

5235
	if (device->ov_left == 0) {
5236 5237 5238 5239 5240
		dw = kmalloc(sizeof(*dw), GFP_NOIO);
		if (dw) {
			dw->w.cb = w_ov_finished;
			dw->device = device;
			drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
P
Philipp Reisner 已提交
5241
		} else {
5242
			drbd_err(device, "kmalloc(dw) failed.");
5243 5244
			ov_out_of_sync_print(device);
			drbd_resync_finished(device);
P
Philipp Reisner 已提交
5245 5246
		}
	}
5247
	put_ldev(device);
5248
	return 0;
P
Philipp Reisner 已提交
5249 5250
}

5251
static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
5252
{
5253
	return 0;
P
Philipp Reisner 已提交
5254 5255
}

5256
static int connection_finish_peer_reqs(struct drbd_connection *connection)
5257
{
5258
	struct drbd_peer_device *peer_device;
P
Philipp Reisner 已提交
5259
	int vnr, not_empty = 0;
5260 5261

	do {
5262
		clear_bit(SIGNAL_ASENDER, &connection->flags);
5263
		flush_signals(current);
P
Philipp Reisner 已提交
5264 5265

		rcu_read_lock();
5266 5267
		idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
			struct drbd_device *device = peer_device->device;
5268
			kref_get(&device->kref);
P
Philipp Reisner 已提交
5269
			rcu_read_unlock();
5270
			if (drbd_finish_peer_reqs(device)) {
5271
				kref_put(&device->kref, drbd_destroy_device);
P
Philipp Reisner 已提交
5272
				return 1;
5273
			}
5274
			kref_put(&device->kref, drbd_destroy_device);
P
Philipp Reisner 已提交
5275
			rcu_read_lock();
5276
		}
5277
		set_bit(SIGNAL_ASENDER, &connection->flags);
5278

5279
		spin_lock_irq(&connection->resource->req_lock);
5280 5281
		idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
			struct drbd_device *device = peer_device->device;
5282
			not_empty = !list_empty(&device->done_ee);
5283 5284 5285
			if (not_empty)
				break;
		}
5286
		spin_unlock_irq(&connection->resource->req_lock);
P
Philipp Reisner 已提交
5287
		rcu_read_unlock();
5288 5289 5290
	} while (not_empty);

	return 0;
5291 5292
}

P
Philipp Reisner 已提交
5293 5294
struct asender_cmd {
	size_t pkt_size;
5295
	int (*fn)(struct drbd_connection *connection, struct packet_info *);
P
Philipp Reisner 已提交
5296 5297
};

5298
static struct asender_cmd asender_tbl[] = {
5299 5300
	[P_PING]	    = { 0, got_Ping },
	[P_PING_ACK]	    = { 0, got_PingAck },
P
Philipp Reisner 已提交
5301 5302 5303
	[P_RECV_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
	[P_WRITE_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
	[P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5304
	[P_SUPERSEDED]   = { sizeof(struct p_block_ack), got_BlockAck },
P
Philipp Reisner 已提交
5305 5306
	[P_NEG_ACK]	    = { sizeof(struct p_block_ack), got_NegAck },
	[P_NEG_DREPLY]	    = { sizeof(struct p_block_ack), got_NegDReply },
5307
	[P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
P
Philipp Reisner 已提交
5308 5309 5310 5311
	[P_OV_RESULT]	    = { sizeof(struct p_block_ack), got_OVResult },
	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5312
	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5313 5314 5315
	[P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
	[P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
	[P_RETRY_WRITE]	    = { sizeof(struct p_block_ack), got_BlockAck },
5316
};
P
Philipp Reisner 已提交
5317 5318 5319

int drbd_asender(struct drbd_thread *thi)
{
5320
	struct drbd_connection *connection = thi->connection;
P
Philipp Reisner 已提交
5321
	struct asender_cmd *cmd = NULL;
5322
	struct packet_info pi;
5323
	int rv;
5324
	void *buf    = connection->meta.rbuf;
P
Philipp Reisner 已提交
5325
	int received = 0;
5326
	unsigned int header_size = drbd_header_size(connection);
5327
	int expect   = header_size;
5328 5329
	bool ping_timeout_active = false;
	struct net_conf *nc;
5330
	int ping_timeo, tcp_cork, ping_int;
P
Philipp Reisner 已提交
5331
	struct sched_param param = { .sched_priority = 2 };
P
Philipp Reisner 已提交
5332

P
Philipp Reisner 已提交
5333 5334
	rv = sched_setscheduler(current, SCHED_RR, &param);
	if (rv < 0)
5335
		drbd_err(connection, "drbd_asender: ERROR set priority, ret=%d\n", rv);
P
Philipp Reisner 已提交
5336

5337
	while (get_t_state(thi) == RUNNING) {
5338
		drbd_thread_current_set_cpu(thi);
P
Philipp Reisner 已提交
5339

5340
		rcu_read_lock();
5341
		nc = rcu_dereference(connection->net_conf);
5342
		ping_timeo = nc->ping_timeo;
5343
		tcp_cork = nc->tcp_cork;
5344 5345 5346
		ping_int = nc->ping_int;
		rcu_read_unlock();

5347 5348
		if (test_and_clear_bit(SEND_PING, &connection->flags)) {
			if (drbd_send_ping(connection)) {
5349
				drbd_err(connection, "drbd_send_ping has failed\n");
P
Philipp Reisner 已提交
5350
				goto reconnect;
5351
			}
5352
			connection->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5353
			ping_timeout_active = true;
P
Philipp Reisner 已提交
5354 5355
		}

5356 5357
		/* TODO: conditionally cork; it may hurt latency if we cork without
		   much to send */
5358
		if (tcp_cork)
5359 5360
			drbd_tcp_cork(connection->meta.socket);
		if (connection_finish_peer_reqs(connection)) {
5361
			drbd_err(connection, "connection_finish_peer_reqs() failed\n");
5362
			goto reconnect;
P
Philipp Reisner 已提交
5363 5364
		}
		/* but unconditionally uncork unless disabled */
5365
		if (tcp_cork)
5366
			drbd_tcp_uncork(connection->meta.socket);
P
Philipp Reisner 已提交
5367 5368 5369 5370 5371

		/* short circuit, recv_msg would return EINTR anyways. */
		if (signal_pending(current))
			continue;

5372 5373
		rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
		clear_bit(SIGNAL_ASENDER, &connection->flags);
P
Philipp Reisner 已提交
5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390

		flush_signals(current);

		/* Note:
		 * -EINTR	 (on meta) we got a signal
		 * -EAGAIN	 (on meta) rcvtimeo expired
		 * -ECONNRESET	 other side closed the connection
		 * -ERESTARTSYS  (on data) we got a signal
		 * rv <  0	 other than above: unexpected error!
		 * rv == expected: full header or command
		 * rv <  expected: "woken" by signal during receive
		 * rv == 0	 : "connection shut down by peer"
		 */
		if (likely(rv > 0)) {
			received += rv;
			buf	 += rv;
		} else if (rv == 0) {
5391
			if (test_bit(DISCONNECT_SENT, &connection->flags)) {
5392 5393
				long t;
				rcu_read_lock();
5394
				t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
5395 5396
				rcu_read_unlock();

5397 5398
				t = wait_event_timeout(connection->ping_wait,
						       connection->cstate < C_WF_REPORT_PARAMS,
5399
						       t);
5400 5401 5402
				if (t)
					break;
			}
5403
			drbd_err(connection, "meta connection shut down by peer.\n");
P
Philipp Reisner 已提交
5404 5405
			goto reconnect;
		} else if (rv == -EAGAIN) {
5406 5407
			/* If the data socket received something meanwhile,
			 * that is good enough: peer is still alive. */
5408 5409
			if (time_after(connection->last_received,
				jiffies - connection->meta.socket->sk->sk_rcvtimeo))
5410
				continue;
5411
			if (ping_timeout_active) {
5412
				drbd_err(connection, "PingAck did not arrive in time.\n");
P
Philipp Reisner 已提交
5413 5414
				goto reconnect;
			}
5415
			set_bit(SEND_PING, &connection->flags);
P
Philipp Reisner 已提交
5416 5417 5418 5419
			continue;
		} else if (rv == -EINTR) {
			continue;
		} else {
5420
			drbd_err(connection, "sock_recvmsg returned %d\n", rv);
P
Philipp Reisner 已提交
5421 5422 5423 5424
			goto reconnect;
		}

		if (received == expect && cmd == NULL) {
5425
			if (decode_header(connection, connection->meta.rbuf, &pi))
P
Philipp Reisner 已提交
5426
				goto reconnect;
5427
			cmd = &asender_tbl[pi.cmd];
5428
			if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5429
				drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
5430
					 cmdname(pi.cmd), pi.cmd);
P
Philipp Reisner 已提交
5431 5432
				goto disconnect;
			}
5433
			expect = header_size + cmd->pkt_size;
5434
			if (pi.size != expect - header_size) {
5435
				drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
5436
					pi.cmd, pi.size);
P
Philipp Reisner 已提交
5437
				goto reconnect;
5438
			}
P
Philipp Reisner 已提交
5439 5440
		}
		if (received == expect) {
5441
			bool err;
5442

5443
			err = cmd->fn(connection, &pi);
5444
			if (err) {
5445
				drbd_err(connection, "%pf failed\n", cmd->fn);
P
Philipp Reisner 已提交
5446
				goto reconnect;
5447
			}
P
Philipp Reisner 已提交
5448

5449
			connection->last_received = jiffies;
5450

5451 5452
			if (cmd == &asender_tbl[P_PING_ACK]) {
				/* restore idle timeout */
5453
				connection->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5454 5455
				ping_timeout_active = false;
			}
5456

5457
			buf	 = connection->meta.rbuf;
P
Philipp Reisner 已提交
5458
			received = 0;
5459
			expect	 = header_size;
P
Philipp Reisner 已提交
5460 5461 5462 5463 5464 5465
			cmd	 = NULL;
		}
	}

	if (0) {
reconnect:
5466 5467
		conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
		conn_md_sync(connection);
P
Philipp Reisner 已提交
5468 5469 5470
	}
	if (0) {
disconnect:
5471
		conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
P
Philipp Reisner 已提交
5472
	}
5473
	clear_bit(SIGNAL_ASENDER, &connection->flags);
P
Philipp Reisner 已提交
5474

5475
	drbd_info(connection, "asender terminated\n");
P
Philipp Reisner 已提交
5476 5477 5478

	return 0;
}