lowcomms.c 44.8 KB
Newer Older
1 2 3 4
/******************************************************************************
*******************************************************************************
**
**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5
**  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
**
**  This copyrighted material is made available to anyone wishing to use,
**  modify, copy, or redistribute it subject to the terms and conditions
**  of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/

/*
 * lowcomms.c
 *
 * This is the "low-level" comms layer.
 *
 * It is responsible for sending/receiving messages
 * from other nodes in the cluster.
 *
 * Cluster nodes are referred to by their nodeids. nodeids are
 * simply 32 bit numbers to the locking module - if they need to
J
Joe Perches 已提交
24
 * be expanded for the cluster infrastructure then that is its
25 26 27 28 29 30 31 32 33 34 35 36 37 38
 * responsibility. It is this layer's
 * responsibility to resolve these into IP address or
 * whatever it needs for inter-node communication.
 *
 * The comms level is two kernel threads that deal mainly with
 * the receiving of messages from other nodes and passing them
 * up to the mid-level comms layer (which understands the
 * message format) for execution by the locking core, and
 * a send thread which does all the setting up of connections
 * to remote nodes and the sending of data. Threads are not allowed
 * to send their own data because it may cause them to wait in times
 * of high load. Also, this way, the sending thread can collect together
 * messages bound for one node and send them in one block.
 *
J
Joe Perches 已提交
39
 * lowcomms will choose to use either TCP or SCTP as its transport layer
40
 * depending on the configuration variable 'protocol'. This should be set
J
Joe Perches 已提交
41
 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
42 43
 * cluster-wide mechanism as it must be the same on all nodes of the cluster
 * for the DLM to function.
44 45 46 47 48 49 50
 *
 */

#include <asm/ioctls.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/pagemap.h>
51
#include <linux/file.h>
52
#include <linux/mutex.h>
53
#include <linux/sctp.h>
54
#include <linux/slab.h>
55
#include <net/sctp/sctp.h>
J
Joe Perches 已提交
56
#include <net/ipv6.h>
57 58 59 60 61 62

#include "dlm_internal.h"
#include "lowcomms.h"
#include "midcomms.h"
#include "config.h"

63
#define NEEDED_RMEM (4*1024*1024)
64
#define CONN_HASH_SIZE 32
65

66 67 68
/* Number of messages to send before rescheduling */
#define MAX_SEND_MSG_COUNT 25

69
struct cbuf {
P
Patrick Caulfield 已提交
70 71 72
	unsigned int base;
	unsigned int len;
	unsigned int mask;
73 74
};

P
Patrick Caulfield 已提交
75 76 77 78
static void cbuf_add(struct cbuf *cb, int n)
{
	cb->len += n;
}
79

P
Patrick Caulfield 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
static int cbuf_data(struct cbuf *cb)
{
	return ((cb->base + cb->len) & cb->mask);
}

static void cbuf_init(struct cbuf *cb, int size)
{
	cb->base = cb->len = 0;
	cb->mask = size-1;
}

static void cbuf_eat(struct cbuf *cb, int n)
{
	cb->len  -= n;
	cb->base += n;
	cb->base &= cb->mask;
}

static bool cbuf_empty(struct cbuf *cb)
{
	return cb->len == 0;
}
102 103 104 105

struct connection {
	struct socket *sock;	/* NULL if not connected */
	uint32_t nodeid;	/* So we know who we are in the list */
106
	struct mutex sock_mutex;
107
	unsigned long flags;
108 109 110
#define CF_READ_PENDING 1
#define CF_WRITE_PENDING 2
#define CF_CONNECT_PENDING 3
111 112
#define CF_INIT_PENDING 4
#define CF_IS_OTHERCON 5
113
#define CF_CLOSE 6
114
#define CF_APP_LIMITED 7
P
Patrick Caulfield 已提交
115
	struct list_head writequeue;  /* List of outgoing writequeue_entries */
116 117
	spinlock_t writequeue_lock;
	int (*rx_action) (struct connection *);	/* What to do when active */
118
	void (*connect_action) (struct connection *);	/* What to do to connect */
119 120 121 122
	struct page *rx_page;
	struct cbuf cb;
	int retries;
#define MAX_CONNECT_RETRIES 3
123
	int sctp_assoc;
124
	struct hlist_node list;
125
	struct connection *othercon;
126 127
	struct work_struct rwork; /* Receive workqueue */
	struct work_struct swork; /* Send workqueue */
128
	bool try_new_addr;
129 130 131 132 133 134 135 136 137 138 139 140 141 142
};
#define sock2con(x) ((struct connection *)(x)->sk_user_data)

/* An entry waiting to be sent */
struct writequeue_entry {
	struct list_head list;
	struct page *page;
	int offset;
	int len;
	int end;
	int users;
	struct connection *con;
};

143 144 145 146
struct dlm_node_addr {
	struct list_head list;
	int nodeid;
	int addr_count;
147
	int curr_addr_index;
148 149 150 151 152 153
	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
};

static LIST_HEAD(dlm_node_addrs);
static DEFINE_SPINLOCK(dlm_node_addrs_spin);

154 155
static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
static int dlm_local_count;
156
static int dlm_allow_conn;
157

158 159 160
/* Work queues */
static struct workqueue_struct *recv_workqueue;
static struct workqueue_struct *send_workqueue;
161

162
static struct hlist_head connection_hash[CONN_HASH_SIZE];
163
static DEFINE_MUTEX(connections_lock);
P
Patrick Caulfield 已提交
164
static struct kmem_cache *con_cache;
165

166 167
static void process_recv_sockets(struct work_struct *work);
static void process_send_sockets(struct work_struct *work);
168

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

/* This is deliberately very simple because most clusters have simple
   sequential nodeids, so we should be able to go straight to a connection
   struct in the array */
static inline int nodeid_hash(int nodeid)
{
	return nodeid & (CONN_HASH_SIZE-1);
}

static struct connection *__find_con(int nodeid)
{
	int r;
	struct connection *con;

	r = nodeid_hash(nodeid);

185
	hlist_for_each_entry(con, &connection_hash[r], list) {
186 187 188 189 190 191
		if (con->nodeid == nodeid)
			return con;
	}
	return NULL;
}

192 193 194 195 196
/*
 * If 'allocation' is zero then we don't attempt to create a new
 * connection structure for this node.
 */
static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
197 198
{
	struct connection *con = NULL;
199
	int r;
200

201
	con = __find_con(nodeid);
202 203
	if (con || !alloc)
		return con;
204

205 206 207
	con = kmem_cache_zalloc(con_cache, alloc);
	if (!con)
		return NULL;
208

209 210
	r = nodeid_hash(nodeid);
	hlist_add_head(&con->list, &connection_hash[r]);
211

212 213 214 215 216 217
	con->nodeid = nodeid;
	mutex_init(&con->sock_mutex);
	INIT_LIST_HEAD(&con->writequeue);
	spin_lock_init(&con->writequeue_lock);
	INIT_WORK(&con->swork, process_send_sockets);
	INIT_WORK(&con->rwork, process_recv_sockets);
218

219 220
	/* Setup action pointers for child sockets */
	if (con->nodeid) {
221
		struct connection *zerocon = __find_con(0);
222

223 224 225
		con->connect_action = zerocon->connect_action;
		if (!con->rx_action)
			con->rx_action = zerocon->rx_action;
226 227
	}

228 229 230
	return con;
}

231 232 233 234
/* Loop round all connections */
static void foreach_conn(void (*conn_func)(struct connection *c))
{
	int i;
235
	struct hlist_node *n;
236 237 238
	struct connection *con;

	for (i = 0; i < CONN_HASH_SIZE; i++) {
239
		hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
240 241 242 243
			conn_func(con);
	}
}

244 245 246 247
static struct connection *nodeid2con(int nodeid, gfp_t allocation)
{
	struct connection *con;

248
	mutex_lock(&connections_lock);
249
	con = __nodeid2con(nodeid, allocation);
250
	mutex_unlock(&connections_lock);
251

252 253 254
	return con;
}

255 256 257 258 259 260
/* This is a bit drastic, but only called when things go wrong */
static struct connection *assoc2con(int assoc_id)
{
	int i;
	struct connection *con;

261
	mutex_lock(&connections_lock);
262 263

	for (i = 0 ; i < CONN_HASH_SIZE; i++) {
264
		hlist_for_each_entry(con, &connection_hash[i], list) {
265
			if (con->sctp_assoc == assoc_id) {
266 267 268
				mutex_unlock(&connections_lock);
				return con;
			}
269 270
		}
	}
271
	mutex_unlock(&connections_lock);
272 273 274
	return NULL;
}

275 276 277 278 279 280 281 282 283 284 285 286
static struct dlm_node_addr *find_node_addr(int nodeid)
{
	struct dlm_node_addr *na;

	list_for_each_entry(na, &dlm_node_addrs, list) {
		if (na->nodeid == nodeid)
			return na;
	}
	return NULL;
}

static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
287
{
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
	switch (x->ss_family) {
	case AF_INET: {
		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
		struct sockaddr_in *siny = (struct sockaddr_in *)y;
		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
			return 0;
		if (sinx->sin_port != siny->sin_port)
			return 0;
		break;
	}
	case AF_INET6: {
		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
			return 0;
		if (sinx->sin6_port != siny->sin6_port)
			return 0;
		break;
	}
	default:
		return 0;
	}
	return 1;
}

static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
314
			  struct sockaddr *sa_out, bool try_new_addr)
315 316 317
{
	struct sockaddr_storage sas;
	struct dlm_node_addr *na;
318 319 320 321

	if (!dlm_local_count)
		return -1;

322 323
	spin_lock(&dlm_node_addrs_spin);
	na = find_node_addr(nodeid);
324 325 326 327 328 329 330 331 332 333
	if (na && na->addr_count) {
		if (try_new_addr) {
			na->curr_addr_index++;
			if (na->curr_addr_index == na->addr_count)
				na->curr_addr_index = 0;
		}

		memcpy(&sas, na->addr[na->curr_addr_index ],
			sizeof(struct sockaddr_storage));
	}
334 335 336 337 338 339 340 341 342 343 344 345 346
	spin_unlock(&dlm_node_addrs_spin);

	if (!na)
		return -EEXIST;

	if (!na->addr_count)
		return -ENOENT;

	if (sas_out)
		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));

	if (!sa_out)
		return 0;
347 348

	if (dlm_local_addr[0]->ss_family == AF_INET) {
349 350
		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
351 352
		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
	} else {
353 354
		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
A
Alexey Dobriyan 已提交
355
		ret6->sin6_addr = in6->sin6_addr;
356 357 358 359 360
	}

	return 0;
}

361 362 363 364
static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
{
	struct dlm_node_addr *na;
	int rv = -EEXIST;
365
	int addr_i;
366 367 368 369 370 371

	spin_lock(&dlm_node_addrs_spin);
	list_for_each_entry(na, &dlm_node_addrs, list) {
		if (!na->addr_count)
			continue;

372 373 374 375 376 377 378
		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
			if (addr_compare(na->addr[addr_i], addr)) {
				*nodeid = na->nodeid;
				rv = 0;
				goto unlock;
			}
		}
379
	}
380
unlock:
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	spin_unlock(&dlm_node_addrs_spin);
	return rv;
}

int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
{
	struct sockaddr_storage *new_addr;
	struct dlm_node_addr *new_node, *na;

	new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
	if (!new_node)
		return -ENOMEM;

	new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
	if (!new_addr) {
		kfree(new_node);
		return -ENOMEM;
	}

	memcpy(new_addr, addr, len);

	spin_lock(&dlm_node_addrs_spin);
	na = find_node_addr(nodeid);
	if (!na) {
		new_node->nodeid = nodeid;
		new_node->addr[0] = new_addr;
		new_node->addr_count = 1;
		list_add(&new_node->list, &dlm_node_addrs);
		spin_unlock(&dlm_node_addrs_spin);
		return 0;
	}

	if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
		spin_unlock(&dlm_node_addrs_spin);
		kfree(new_addr);
		kfree(new_node);
		return -ENOSPC;
	}

	na->addr[na->addr_count++] = new_addr;
	spin_unlock(&dlm_node_addrs_spin);
	kfree(new_node);
	return 0;
}

426
/* Data available on socket or listen socket received a connect */
427
static void lowcomms_data_ready(struct sock *sk)
428 429
{
	struct connection *con = sock2con(sk);
P
Patrick Caulfield 已提交
430
	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
431
		queue_work(recv_workqueue, &con->rwork);
432 433 434 435 436 437
}

static void lowcomms_write_space(struct sock *sk)
{
	struct connection *con = sock2con(sk);

438 439 440 441 442 443 444 445 446 447 448
	if (!con)
		return;

	clear_bit(SOCK_NOSPACE, &con->sock->flags);

	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
		con->sock->sk->sk_write_pending--;
		clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags);
	}

	if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
449
		queue_work(send_workqueue, &con->swork);
450 451 452 453
}

static inline void lowcomms_connect_sock(struct connection *con)
{
454 455
	if (test_bit(CF_CLOSE, &con->flags))
		return;
456 457
	if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
		queue_work(send_workqueue, &con->swork);
458 459 460 461
}

static void lowcomms_state_change(struct sock *sk)
{
P
Patrick Caulfield 已提交
462
	if (sk->sk_state == TCP_ESTABLISHED)
463 464 465
		lowcomms_write_space(sk);
}

466 467 468 469
int dlm_lowcomms_connect_node(int nodeid)
{
	struct connection *con;

470 471 472 473
	/* with sctp there's no connecting without sending */
	if (dlm_config.ci_protocol != 0)
		return 0;

474 475 476 477 478 479 480 481 482 483
	if (nodeid == dlm_our_nodeid())
		return 0;

	con = nodeid2con(nodeid, GFP_NOFS);
	if (!con)
		return -ENOMEM;
	lowcomms_connect_sock(con);
	return 0;
}

484
/* Make a socket active */
485
static void add_sock(struct socket *sock, struct connection *con)
486 487 488 489 490 491 492
{
	con->sock = sock;

	/* Install a data_ready callback */
	con->sock->sk->sk_data_ready = lowcomms_data_ready;
	con->sock->sk->sk_write_space = lowcomms_write_space;
	con->sock->sk->sk_state_change = lowcomms_state_change;
493
	con->sock->sk->sk_user_data = con;
494
	con->sock->sk->sk_allocation = GFP_NOFS;
495 496
}

497
/* Add the port number to an IPv6 or 4 sockaddr and return the address
498 499 500 501
   length */
static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
			  int *addr_len)
{
502
	saddr->ss_family =  dlm_local_addr[0]->ss_family;
P
Patrick Caulfield 已提交
503
	if (saddr->ss_family == AF_INET) {
504 505 506
		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
		in4_addr->sin_port = cpu_to_be16(port);
		*addr_len = sizeof(struct sockaddr_in);
507
		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
P
Patrick Caulfield 已提交
508
	} else {
509 510 511 512
		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
		in6_addr->sin6_port = cpu_to_be16(port);
		*addr_len = sizeof(struct sockaddr_in6);
	}
513
	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
514 515 516
}

/* Close a remote connection and tidy up */
P
Patrick Caulfield 已提交
517
static void close_connection(struct connection *con, bool and_other)
518
{
519
	mutex_lock(&con->sock_mutex);
520 521 522 523 524 525

	if (con->sock) {
		sock_release(con->sock);
		con->sock = NULL;
	}
	if (con->othercon && and_other) {
P
Patrick Caulfield 已提交
526 527
		/* Will only re-enter once. */
		close_connection(con->othercon, false);
528 529 530 531 532
	}
	if (con->rx_page) {
		__free_page(con->rx_page);
		con->rx_page = NULL;
	}
P
Patrick Caulfield 已提交
533

534 535
	con->retries = 0;
	mutex_unlock(&con->sock_mutex);
536 537
}

538 539 540
/* We only send shutdown messages to nodes that are not part of the cluster
 * or if we get multiple connections from a node.
 */
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
static void sctp_send_shutdown(sctp_assoc_t associd)
{
	static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
	struct msghdr outmessage;
	struct cmsghdr *cmsg;
	struct sctp_sndrcvinfo *sinfo;
	int ret;
	struct connection *con;

	con = nodeid2con(0,0);
	BUG_ON(con == NULL);

	outmessage.msg_name = NULL;
	outmessage.msg_namelen = 0;
	outmessage.msg_control = outcmsg;
	outmessage.msg_controllen = sizeof(outcmsg);
	outmessage.msg_flags = MSG_EOR;

	cmsg = CMSG_FIRSTHDR(&outmessage);
	cmsg->cmsg_level = IPPROTO_SCTP;
	cmsg->cmsg_type = SCTP_SNDRCV;
	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
	outmessage.msg_controllen = cmsg->cmsg_len;
	sinfo = CMSG_DATA(cmsg);
	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));

	sinfo->sinfo_flags |= MSG_EOF;
	sinfo->sinfo_assoc_id = associd;

	ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0);

	if (ret != 0)
		log_print("send EOF to node failed: %d", ret);
}

576 577
static void sctp_init_failed_foreach(struct connection *con)
{
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592

	/*
	 * Don't try to recover base con and handle race where the
	 * other node's assoc init creates a assoc and we get that
	 * notification, then we get a notification that our attempt
	 * failed due. This happens when we are still trying the primary
	 * address, but the other node has already tried secondary addrs
	 * and found one that worked.
	 */
	if (!con->nodeid || con->sctp_assoc)
		return;

	log_print("Retrying SCTP association init for node %d\n", con->nodeid);

	con->try_new_addr = true;
593
	con->sctp_assoc = 0;
594
	if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) {
595 596 597 598 599
		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
			queue_work(send_workqueue, &con->swork);
	}
}

600 601 602 603
/* INIT failed but we don't know which node...
   restart INIT on all pending nodes */
static void sctp_init_failed(void)
{
604
	mutex_lock(&connections_lock);
605 606 607

	foreach_conn(sctp_init_failed_foreach);

608
	mutex_unlock(&connections_lock);
609 610
}

M
Mike Christie 已提交
611 612 613 614 615 616 617 618 619 620 621
static void retry_failed_sctp_send(struct connection *recv_con,
				   struct sctp_send_failed *sn_send_failed,
				   char *buf)
{
	int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed);
	struct dlm_mhandle *mh;
	struct connection *con;
	char *retry_buf;
	int nodeid = sn_send_failed->ssf_info.sinfo_ppid;

	log_print("Retry sending %d bytes to node id %d", len, nodeid);
622 623 624 625 626
	
	if (!nodeid) {
		log_print("Shouldn't resend data via listening connection.");
		return;
	}
M
Mike Christie 已提交
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653

	con = nodeid2con(nodeid, 0);
	if (!con) {
		log_print("Could not look up con for nodeid %d\n",
			  nodeid);
		return;
	}

	mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf);
	if (!mh) {
		log_print("Could not allocate buf for retry.");
		return;
	}
	memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len);
	dlm_lowcomms_commit_buffer(mh);

	/*
	 * If we got a assoc changed event before the send failed event then
	 * we only need to retry the send.
	 */
	if (con->sctp_assoc) {
		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
			queue_work(send_workqueue, &con->swork);
	} else
		sctp_init_failed_foreach(con);
}

654
/* Something happened to an association */
D
David Teigland 已提交
655 656
static void process_sctp_notification(struct connection *con,
				      struct msghdr *msg, char *buf)
657 658
{
	union sctp_notification *sn = (union sctp_notification *)buf;
659
	struct linger linger;
660

M
Mike Christie 已提交
661 662 663 664 665
	switch (sn->sn_header.sn_type) {
	case SCTP_SEND_FAILED:
		retry_failed_sctp_send(con, &sn->sn_send_failed, buf);
		break;
	case SCTP_ASSOC_CHANGE:
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
		switch (sn->sn_assoc_change.sac_state) {
		case SCTP_COMM_UP:
		case SCTP_RESTART:
		{
			/* Check that the new node is in the lockspace */
			struct sctp_prim prim;
			int nodeid;
			int prim_len, ret;
			int addr_len;
			struct connection *new_con;

			/*
			 * We get this before any data for an association.
			 * We verify that the node is in the cluster and
			 * then peel off a socket for it.
			 */
			if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
				log_print("COMM_UP for invalid assoc ID %d",
D
David Teigland 已提交
684
					 (int)sn->sn_assoc_change.sac_assoc_id);
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
				sctp_init_failed();
				return;
			}
			memset(&prim, 0, sizeof(struct sctp_prim));
			prim_len = sizeof(struct sctp_prim);
			prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;

			ret = kernel_getsockopt(con->sock,
						IPPROTO_SCTP,
						SCTP_PRIMARY_ADDR,
						(char*)&prim,
						&prim_len);
			if (ret < 0) {
				log_print("getsockopt/sctp_primary_addr on "
					  "new assoc %d failed : %d",
					  (int)sn->sn_assoc_change.sac_assoc_id,
					  ret);

				/* Retry INIT later */
				new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
				if (new_con)
					clear_bit(CF_CONNECT_PENDING, &con->flags);
				return;
			}
			make_sockaddr(&prim.ssp_addr, 0, &addr_len);
710
			if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
711 712
				unsigned char *b=(unsigned char *)&prim.ssp_addr;
				log_print("reject connect from unknown addr");
713 714
				print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
						     b, sizeof(struct sockaddr_storage));
715 716 717 718
				sctp_send_shutdown(prim.ssp_assoc_id);
				return;
			}

D
David Teigland 已提交
719
			new_con = nodeid2con(nodeid, GFP_NOFS);
720 721 722
			if (!new_con)
				return;

723 724 725 726 727 728 729 730
			if (new_con->sock) {
				log_print("reject connect from node %d: "
					  "already has a connection.",
					  nodeid);
				sctp_send_shutdown(prim.ssp_assoc_id);
				return;
			}

731
			/* Peel off a new sock */
732
			lock_sock(con->sock->sk);
733 734 735
			ret = sctp_do_peeloff(con->sock->sk,
				sn->sn_assoc_change.sac_assoc_id,
				&new_con->sock);
736
			release_sock(con->sock->sk);
D
David Teigland 已提交
737
			if (ret < 0) {
D
David Teigland 已提交
738
				log_print("Can't peel off a socket for "
D
David Teigland 已提交
739
					  "connection %d to node %d: err=%d",
740 741
					  (int)sn->sn_assoc_change.sac_assoc_id,
					  nodeid, ret);
D
David Teigland 已提交
742
				return;
743 744 745
			}
			add_sock(new_con->sock, new_con);

746 747 748 749 750 751 752
			linger.l_onoff = 1;
			linger.l_linger = 0;
			ret = kernel_setsockopt(new_con->sock, SOL_SOCKET, SO_LINGER,
						(char *)&linger, sizeof(linger));
			if (ret < 0)
				log_print("set socket option SO_LINGER failed");

D
David Teigland 已提交
753 754
			log_print("connecting to %d sctp association %d",
				 nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
755

756
			new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id;
757
			new_con->try_new_addr = false;
758 759
			/* Send any pending writes */
			clear_bit(CF_CONNECT_PENDING, &new_con->flags);
760
			clear_bit(CF_INIT_PENDING, &new_con->flags);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
			if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) {
				queue_work(send_workqueue, &new_con->swork);
			}
			if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags))
				queue_work(recv_workqueue, &new_con->rwork);
		}
		break;

		case SCTP_COMM_LOST:
		case SCTP_SHUTDOWN_COMP:
		{
			con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
			if (con) {
				con->sctp_assoc = 0;
			}
		}
		break;

		case SCTP_CANT_STR_ASSOC:
		{
M
Mike Christie 已提交
781
			/* Will retry init when we get the send failed notification */
782 783 784 785 786 787 788 789 790
			log_print("Can't start SCTP association - retrying");
		}
		break;

		default:
			log_print("unexpected SCTP assoc change id=%d state=%d",
				  (int)sn->sn_assoc_change.sac_assoc_id,
				  sn->sn_assoc_change.sac_state);
		}
M
Mike Christie 已提交
791 792
	default:
		; /* fall through */
793 794 795
	}
}

796 797 798 799
/* Data received from remote end */
static int receive_from_sock(struct connection *con)
{
	int ret = 0;
A
Al Viro 已提交
800 801
	struct msghdr msg = {};
	struct kvec iov[2];
802 803 804
	unsigned len;
	int r;
	int call_again_soon = 0;
A
Al Viro 已提交
805
	int nvec;
806
	char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
807

808
	mutex_lock(&con->sock_mutex);
809

810 811 812 813 814
	if (con->sock == NULL) {
		ret = -EAGAIN;
		goto out_close;
	}

815 816 817 818 819 820 821 822
	if (con->rx_page == NULL) {
		/*
		 * This doesn't need to be atomic, but I think it should
		 * improve performance if it is.
		 */
		con->rx_page = alloc_page(GFP_ATOMIC);
		if (con->rx_page == NULL)
			goto out_resched;
P
Patrick Caulfield 已提交
823
		cbuf_init(&con->cb, PAGE_CACHE_SIZE);
824 825
	}

826 827 828 829 830
	/* Only SCTP needs these really */
	memset(&incmsg, 0, sizeof(incmsg));
	msg.msg_control = incmsg;
	msg.msg_controllen = sizeof(incmsg);

831 832 833 834
	/*
	 * iov[0] is the bit of the circular buffer between the current end
	 * point (cb.base + cb.len) and the end of the buffer.
	 */
P
Patrick Caulfield 已提交
835 836
	iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
	iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
837
	iov[1].iov_len = 0;
A
Al Viro 已提交
838
	nvec = 1;
839 840 841 842 843

	/*
	 * iov[1] is the bit of the circular buffer between the start of the
	 * buffer and the start of the currently used section (cb.base)
	 */
P
Patrick Caulfield 已提交
844 845
	if (cbuf_data(&con->cb) >= con->cb.base) {
		iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
846 847
		iov[1].iov_len = con->cb.base;
		iov[1].iov_base = page_address(con->rx_page);
A
Al Viro 已提交
848
		nvec = 2;
849 850 851
	}
	len = iov[0].iov_len + iov[1].iov_len;

A
Al Viro 已提交
852
	r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
853 854 855
			       MSG_DONTWAIT | MSG_NOSIGNAL);
	if (ret <= 0)
		goto out_close;
P
Patrick Caulfield 已提交
856

857 858 859 860 861 862
	/* Process SCTP notifications */
	if (msg.msg_flags & MSG_NOTIFICATION) {
		msg.msg_control = incmsg;
		msg.msg_controllen = sizeof(incmsg);

		process_sctp_notification(con, &msg,
D
David Teigland 已提交
863
				page_address(con->rx_page) + con->cb.base);
864 865 866 867 868
		mutex_unlock(&con->sock_mutex);
		return 0;
	}
	BUG_ON(con->nodeid == 0);

869 870
	if (ret == len)
		call_again_soon = 1;
P
Patrick Caulfield 已提交
871
	cbuf_add(&con->cb, ret);
872 873 874 875 876
	ret = dlm_process_incoming_buffer(con->nodeid,
					  page_address(con->rx_page),
					  con->cb.base, con->cb.len,
					  PAGE_CACHE_SIZE);
	if (ret == -EBADMSG) {
D
David Teigland 已提交
877 878 879 880
		log_print("lowcomms: addr=%p, base=%u, len=%u, "
			  "iov_len=%u, iov_base[0]=%p, read=%d",
			  page_address(con->rx_page), con->cb.base, con->cb.len,
			  len, iov[0].iov_base, r);
881 882 883
	}
	if (ret < 0)
		goto out_close;
P
Patrick Caulfield 已提交
884
	cbuf_eat(&con->cb, ret);
885

P
Patrick Caulfield 已提交
886
	if (cbuf_empty(&con->cb) && !call_again_soon) {
887 888 889 890 891 892
		__free_page(con->rx_page);
		con->rx_page = NULL;
	}

	if (call_again_soon)
		goto out_resched;
893
	mutex_unlock(&con->sock_mutex);
P
Patrick Caulfield 已提交
894
	return 0;
895

P
Patrick Caulfield 已提交
896
out_resched:
897 898
	if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
		queue_work(recv_workqueue, &con->rwork);
899
	mutex_unlock(&con->sock_mutex);
P
Patrick Caulfield 已提交
900
	return -EAGAIN;
901

P
Patrick Caulfield 已提交
902
out_close:
903
	mutex_unlock(&con->sock_mutex);
P
Patrick Caulfield 已提交
904
	if (ret != -EAGAIN) {
P
Patrick Caulfield 已提交
905
		close_connection(con, false);
906 907
		/* Reconnect when there is something to send */
	}
908 909 910
	/* Don't return success if we really got EOF */
	if (ret == 0)
		ret = -EAGAIN;
911 912 913 914 915

	return ret;
}

/* Listening socket is busy, accept a connection */
916
static int tcp_accept_from_sock(struct connection *con)
917 918 919 920 921 922 923
{
	int result;
	struct sockaddr_storage peeraddr;
	struct socket *newsock;
	int len;
	int nodeid;
	struct connection *newcon;
P
Patrick Caulfield 已提交
924
	struct connection *addcon;
925

926 927 928 929 930 931 932
	mutex_lock(&connections_lock);
	if (!dlm_allow_conn) {
		mutex_unlock(&connections_lock);
		return -1;
	}
	mutex_unlock(&connections_lock);

933
	memset(&peeraddr, 0, sizeof(peeraddr));
934 935
	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
				  SOCK_STREAM, IPPROTO_TCP, &newsock);
936 937 938
	if (result < 0)
		return -ENOMEM;

939
	mutex_lock_nested(&con->sock_mutex, 0);
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961

	result = -ENOTCONN;
	if (con->sock == NULL)
		goto accept_err;

	newsock->type = con->sock->type;
	newsock->ops = con->sock->ops;

	result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
	if (result < 0)
		goto accept_err;

	/* Get the connected socket's peer */
	memset(&peeraddr, 0, sizeof(peeraddr));
	if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
				  &len, 2)) {
		result = -ECONNABORTED;
		goto accept_err;
	}

	/* Get the new node's NODEID */
	make_sockaddr(&peeraddr, 0, &len);
962
	if (addr_to_nodeid(&peeraddr, &nodeid)) {
963
		unsigned char *b=(unsigned char *)&peeraddr;
D
David Teigland 已提交
964
		log_print("connect from non cluster node");
965 966
		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
				     b, sizeof(struct sockaddr_storage));
967
		sock_release(newsock);
968
		mutex_unlock(&con->sock_mutex);
969 970 971 972 973 974 975 976 977 978
		return -1;
	}

	log_print("got connection from %d", nodeid);

	/*  Check to see if we already have a connection to this node. This
	 *  could happen if the two nodes initiate a connection at roughly
	 *  the same time and the connections cross on the wire.
	 *  In this case we store the incoming one in "othercon"
	 */
D
David Teigland 已提交
979
	newcon = nodeid2con(nodeid, GFP_NOFS);
980 981 982 983
	if (!newcon) {
		result = -ENOMEM;
		goto accept_err;
	}
984
	mutex_lock_nested(&newcon->sock_mutex, 1);
985
	if (newcon->sock) {
P
Patrick Caulfield 已提交
986
		struct connection *othercon = newcon->othercon;
987 988

		if (!othercon) {
D
David Teigland 已提交
989
			othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
990
			if (!othercon) {
D
David Teigland 已提交
991
				log_print("failed to allocate incoming socket");
992
				mutex_unlock(&newcon->sock_mutex);
993 994 995 996 997
				result = -ENOMEM;
				goto accept_err;
			}
			othercon->nodeid = nodeid;
			othercon->rx_action = receive_from_sock;
998
			mutex_init(&othercon->sock_mutex);
999 1000
			INIT_WORK(&othercon->swork, process_send_sockets);
			INIT_WORK(&othercon->rwork, process_recv_sockets);
1001
			set_bit(CF_IS_OTHERCON, &othercon->flags);
1002 1003
		}
		if (!othercon->sock) {
1004
			newcon->othercon = othercon;
1005 1006 1007 1008 1009 1010 1011 1012
			othercon->sock = newsock;
			newsock->sk->sk_user_data = othercon;
			add_sock(newsock, othercon);
			addcon = othercon;
		}
		else {
			printk("Extra connection from node %d attempted\n", nodeid);
			result = -EAGAIN;
1013
			mutex_unlock(&newcon->sock_mutex);
1014
			goto accept_err;
1015 1016 1017 1018 1019 1020
		}
	}
	else {
		newsock->sk->sk_user_data = newcon;
		newcon->rx_action = receive_from_sock;
		add_sock(newsock, newcon);
P
Patrick Caulfield 已提交
1021
		addcon = newcon;
1022 1023
	}

1024
	mutex_unlock(&newcon->sock_mutex);
1025 1026 1027

	/*
	 * Add it to the active queue in case we got data
L
Lucas De Marchi 已提交
1028
	 * between processing the accept adding the socket
1029 1030
	 * to the read_sockets list
	 */
P
Patrick Caulfield 已提交
1031 1032
	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
		queue_work(recv_workqueue, &addcon->rwork);
1033
	mutex_unlock(&con->sock_mutex);
1034 1035 1036

	return 0;

P
Patrick Caulfield 已提交
1037
accept_err:
1038
	mutex_unlock(&con->sock_mutex);
1039 1040 1041
	sock_release(newsock);

	if (result != -EAGAIN)
D
David Teigland 已提交
1042
		log_print("error accepting connection from node: %d", result);
1043 1044 1045
	return result;
}

1046 1047 1048 1049 1050 1051
static void free_entry(struct writequeue_entry *e)
{
	__free_page(e->page);
	kfree(e);
}

M
Mike Christie 已提交
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
/*
 * writequeue_entry_complete - try to delete and free write queue entry
 * @e: write queue entry to try to delete
 * @completed: bytes completed
 *
 * writequeue_lock must be held.
 */
static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
{
	e->offset += completed;
	e->len -= completed;

	if (e->len == 0 && e->users == 0) {
		list_del(&e->list);
		free_entry(e);
	}
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
/* Initiate an SCTP association.
   This is a special case of send_to_sock() in that we don't yet have a
   peeled-off socket for this association, so we use the listening socket
   and add the primary IP address of the remote node.
 */
static void sctp_init_assoc(struct connection *con)
{
	struct sockaddr_storage rem_addr;
	char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
	struct msghdr outmessage;
	struct cmsghdr *cmsg;
	struct sctp_sndrcvinfo *sinfo;
	struct connection *base_con;
	struct writequeue_entry *e;
	int len, offset;
	int ret;
	int addrlen;
	struct kvec iov[1];

M
Mike Christie 已提交
1089
	mutex_lock(&con->sock_mutex);
1090
	if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
M
Mike Christie 已提交
1091
		goto unlock;
1092

1093 1094
	if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr,
			   con->try_new_addr)) {
1095
		log_print("no address for nodeid %d", con->nodeid);
M
Mike Christie 已提交
1096
		goto unlock;
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
	}
	base_con = nodeid2con(0, 0);
	BUG_ON(base_con == NULL);

	make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen);

	outmessage.msg_name = &rem_addr;
	outmessage.msg_namelen = addrlen;
	outmessage.msg_control = outcmsg;
	outmessage.msg_controllen = sizeof(outcmsg);
	outmessage.msg_flags = MSG_EOR;

	spin_lock(&con->writequeue_lock);

1111 1112 1113
	if (list_empty(&con->writequeue)) {
		spin_unlock(&con->writequeue_lock);
		log_print("writequeue empty for nodeid %d", con->nodeid);
M
Mike Christie 已提交
1114
		goto unlock;
1115
	}
1116

1117
	e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
1118 1119 1120 1121 1122 1123
	len = e->len;
	offset = e->offset;

	/* Send the first block off the write queue */
	iov[0].iov_base = page_address(e->page)+offset;
	iov[0].iov_len = len;
M
Mike Christie 已提交
1124
	spin_unlock(&con->writequeue_lock);
1125

1126 1127 1128 1129 1130 1131 1132 1133
	if (rem_addr.ss_family == AF_INET) {
		struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr;
		log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr);
	} else {
		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr;
		log_print("Trying to connect to %pI6", &sin6->sin6_addr);
	}

1134 1135 1136 1137 1138 1139
	cmsg = CMSG_FIRSTHDR(&outmessage);
	cmsg->cmsg_level = IPPROTO_SCTP;
	cmsg->cmsg_type = SCTP_SNDRCV;
	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
	sinfo = CMSG_DATA(cmsg);
	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
M
Mike Christie 已提交
1140
	sinfo->sinfo_ppid = cpu_to_le32(con->nodeid);
1141
	outmessage.msg_controllen = cmsg->cmsg_len;
1142
	sinfo->sinfo_flags |= SCTP_ADDR_OVER;
1143 1144 1145

	ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len);
	if (ret < 0) {
D
David Teigland 已提交
1146 1147
		log_print("Send first packet to node %d failed: %d",
			  con->nodeid, ret);
1148 1149 1150 1151 1152 1153 1154

		/* Try again later */
		clear_bit(CF_CONNECT_PENDING, &con->flags);
		clear_bit(CF_INIT_PENDING, &con->flags);
	}
	else {
		spin_lock(&con->writequeue_lock);
M
Mike Christie 已提交
1155
		writequeue_entry_complete(e, ret);
1156 1157
		spin_unlock(&con->writequeue_lock);
	}
M
Mike Christie 已提交
1158 1159 1160

unlock:
	mutex_unlock(&con->sock_mutex);
1161 1162
}

1163
/* Connect a new socket to its peer */
1164
static void tcp_connect_to_sock(struct connection *con)
1165
{
1166
	struct sockaddr_storage saddr, src_addr;
1167
	int addr_len;
1168
	struct socket *sock = NULL;
D
David Teigland 已提交
1169
	int one = 1;
1170
	int result;
1171 1172 1173

	if (con->nodeid == 0) {
		log_print("attempt to connect sock 0 foiled");
P
Patrick Caulfield 已提交
1174
		return;
1175 1176
	}

1177
	mutex_lock(&con->sock_mutex);
1178 1179 1180 1181
	if (con->retries++ > MAX_CONNECT_RETRIES)
		goto out;

	/* Some odd races can cause double-connects, ignore them */
1182
	if (con->sock)
1183 1184 1185
		goto out;

	/* Create a socket to communicate with */
1186 1187
	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
				  SOCK_STREAM, IPPROTO_TCP, &sock);
1188 1189 1190 1191
	if (result < 0)
		goto out_err;

	memset(&saddr, 0, sizeof(saddr));
1192
	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1193 1194
	if (result < 0) {
		log_print("no address for nodeid %d", con->nodeid);
P
Patrick Caulfield 已提交
1195
		goto out_err;
1196
	}
1197 1198 1199

	sock->sk->sk_user_data = con;
	con->rx_action = receive_from_sock;
1200 1201
	con->connect_action = tcp_connect_to_sock;
	add_sock(sock, con);
1202

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	/* Bind to our cluster-known address connecting to avoid
	   routing problems */
	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
	make_sockaddr(&src_addr, 0, &addr_len);
	result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
				 addr_len);
	if (result < 0) {
		log_print("could not bind for connect: %d", result);
		/* This *may* not indicate a critical error */
	}

1214
	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1215 1216

	log_print("connecting to %d", con->nodeid);
D
David Teigland 已提交
1217 1218 1219 1220 1221

	/* Turn off Nagle's algorithm */
	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
			  sizeof(one));

1222
	result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
P
Patrick Caulfield 已提交
1223
				   O_NONBLOCK);
1224 1225
	if (result == -EINPROGRESS)
		result = 0;
P
Patrick Caulfield 已提交
1226 1227
	if (result == 0)
		goto out;
1228

P
Patrick Caulfield 已提交
1229
out_err:
1230 1231 1232
	if (con->sock) {
		sock_release(con->sock);
		con->sock = NULL;
1233 1234
	} else if (sock) {
		sock_release(sock);
1235 1236 1237 1238 1239
	}
	/*
	 * Some errors are fatal and this list might need adjusting. For other
	 * errors we try again until the max number of retries is reached.
	 */
1240 1241 1242 1243 1244 1245 1246 1247 1248
	if (result != -EHOSTUNREACH &&
	    result != -ENETUNREACH &&
	    result != -ENETDOWN && 
	    result != -EINVAL &&
	    result != -EPROTONOSUPPORT) {
		log_print("connect %d try %d error %d", con->nodeid,
			  con->retries, result);
		mutex_unlock(&con->sock_mutex);
		msleep(1000);
1249
		lowcomms_connect_sock(con);
1250
		return;
1251
	}
P
Patrick Caulfield 已提交
1252
out:
1253
	mutex_unlock(&con->sock_mutex);
P
Patrick Caulfield 已提交
1254
	return;
1255 1256
}

1257 1258
static struct socket *tcp_create_listen_sock(struct connection *con,
					     struct sockaddr_storage *saddr)
1259
{
P
Patrick Caulfield 已提交
1260
	struct socket *sock = NULL;
1261 1262 1263 1264
	int result = 0;
	int one = 1;
	int addr_len;

1265
	if (dlm_local_addr[0]->ss_family == AF_INET)
1266 1267 1268 1269 1270
		addr_len = sizeof(struct sockaddr_in);
	else
		addr_len = sizeof(struct sockaddr_in6);

	/* Create a socket to communicate with */
1271 1272
	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
				  SOCK_STREAM, IPPROTO_TCP, &sock);
1273
	if (result < 0) {
D
David Teigland 已提交
1274
		log_print("Can't create listening comms socket");
1275 1276 1277
		goto create_out;
	}

D
David Teigland 已提交
1278 1279 1280 1281
	/* Turn off Nagle's algorithm */
	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
			  sizeof(one));

1282 1283 1284
	result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
				   (char *)&one, sizeof(one));

1285
	if (result < 0) {
D
David Teigland 已提交
1286
		log_print("Failed to set SO_REUSEADDR on socket: %d", result);
1287
	}
1288 1289
	con->rx_action = tcp_accept_from_sock;
	con->connect_action = tcp_connect_to_sock;
1290 1291

	/* Bind to our port */
1292
	make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1293 1294
	result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
	if (result < 0) {
D
David Teigland 已提交
1295
		log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1296 1297 1298 1299 1300
		sock_release(sock);
		sock = NULL;
		con->sock = NULL;
		goto create_out;
	}
1301
	result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
P
Patrick Caulfield 已提交
1302
				 (char *)&one, sizeof(one));
1303
	if (result < 0) {
D
David Teigland 已提交
1304
		log_print("Set keepalive failed: %d", result);
1305 1306 1307 1308
	}

	result = sock->ops->listen(sock, 5);
	if (result < 0) {
D
David Teigland 已提交
1309
		log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1310 1311 1312 1313 1314
		sock_release(sock);
		sock = NULL;
		goto create_out;
	}

P
Patrick Caulfield 已提交
1315
create_out:
1316 1317 1318
	return sock;
}

1319 1320 1321 1322 1323 1324
/* Get local addresses */
static void init_local(void)
{
	struct sockaddr_storage sas, *addr;
	int i;

1325
	dlm_local_count = 0;
1326
	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1327 1328 1329
		if (dlm_our_addr(&sas, i))
			break;

D
David Teigland 已提交
1330
		addr = kmalloc(sizeof(*addr), GFP_NOFS);
1331 1332 1333 1334 1335 1336 1337
		if (!addr)
			break;
		memcpy(addr, &sas, sizeof(*addr));
		dlm_local_addr[dlm_local_count++] = addr;
	}
}

D
David Teigland 已提交
1338 1339 1340 1341 1342
/* Bind to an IP address. SCTP allows multiple address so it can do
   multi-homing */
static int add_sctp_bind_addr(struct connection *sctp_con,
			      struct sockaddr_storage *addr,
			      int addr_len, int num)
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
{
	int result = 0;

	if (num == 1)
		result = kernel_bind(sctp_con->sock,
				     (struct sockaddr *) addr,
				     addr_len);
	else
		result = kernel_setsockopt(sctp_con->sock, SOL_SCTP,
					   SCTP_SOCKOPT_BINDX_ADD,
					   (char *)addr, addr_len);

	if (result < 0)
		log_print("Can't bind to port %d addr number %d",
			  dlm_config.ci_tcp_port, num);

	return result;
}
1361

1362 1363 1364 1365 1366 1367 1368
/* Initialise SCTP socket and bind to all interfaces */
static int sctp_listen_for_all(void)
{
	struct socket *sock = NULL;
	struct sockaddr_storage localaddr;
	struct sctp_event_subscribe subscribe;
	int result = -EINVAL, num = 1, i, addr_len;
D
David Teigland 已提交
1369
	struct connection *con = nodeid2con(0, GFP_NOFS);
1370
	int bufsize = NEEDED_RMEM;
M
Mike Christie 已提交
1371
	int one = 1;
1372 1373 1374 1375 1376 1377

	if (!con)
		return -ENOMEM;

	log_print("Using SCTP for communications");

1378 1379
	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
				  SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	if (result < 0) {
		log_print("Can't create comms socket, check SCTP is loaded");
		goto out;
	}

	/* Listen for events */
	memset(&subscribe, 0, sizeof(subscribe));
	subscribe.sctp_data_io_event = 1;
	subscribe.sctp_association_event = 1;
	subscribe.sctp_send_failure_event = 1;
	subscribe.sctp_shutdown_event = 1;
	subscribe.sctp_partial_delivery_event = 1;

1393
	result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
1394 1395
				 (char *)&bufsize, sizeof(bufsize));
	if (result)
D
David Teigland 已提交
1396
		log_print("Error increasing buffer space on socket %d", result);
1397 1398

	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
D
David Teigland 已提交
1399
				   (char *)&subscribe, sizeof(subscribe));
1400 1401 1402 1403 1404 1405
	if (result < 0) {
		log_print("Failed to set SCTP_EVENTS on socket: result=%d",
			  result);
		goto create_delsock;
	}

M
Mike Christie 已提交
1406 1407 1408 1409 1410
	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
				   sizeof(one));
	if (result < 0)
		log_print("Could not set SCTP NODELAY error %d\n", result);

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
	/* Init con struct */
	sock->sk->sk_user_data = con;
	con->sock = sock;
	con->sock->sk->sk_data_ready = lowcomms_data_ready;
	con->rx_action = receive_from_sock;
	con->connect_action = sctp_init_assoc;

	/* Bind to all interfaces. */
	for (i = 0; i < dlm_local_count; i++) {
		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
		make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len);

		result = add_sctp_bind_addr(con, &localaddr, addr_len, num);
		if (result)
			goto create_delsock;
		++num;
	}

	result = sock->ops->listen(sock, 5);
	if (result < 0) {
		log_print("Can't set socket listening");
		goto create_delsock;
	}

	return 0;

create_delsock:
	sock_release(sock);
	con->sock = NULL;
out:
	return result;
}

static int tcp_listen_for_all(void)
1445 1446
{
	struct socket *sock = NULL;
D
David Teigland 已提交
1447
	struct connection *con = nodeid2con(0, GFP_NOFS);
1448 1449
	int result = -EINVAL;

1450 1451 1452
	if (!con)
		return -ENOMEM;

1453
	/* We don't support multi-homed hosts */
1454
	if (dlm_local_addr[1] != NULL) {
D
David Teigland 已提交
1455 1456
		log_print("TCP protocol can't handle multi-homed hosts, "
			  "try SCTP");
1457 1458 1459 1460 1461 1462
		return -EINVAL;
	}

	log_print("Using TCP for communications");

	sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
	if (sock) {
		add_sock(sock, con);
		result = 0;
	}
	else {
		result = -EADDRINUSE;
	}

	return result;
}



static struct writequeue_entry *new_writequeue_entry(struct connection *con,
						     gfp_t allocation)
{
	struct writequeue_entry *entry;

	entry = kmalloc(sizeof(struct writequeue_entry), allocation);
	if (!entry)
		return NULL;

	entry->page = alloc_page(allocation);
	if (!entry->page) {
		kfree(entry);
		return NULL;
	}

	entry->offset = 0;
	entry->len = 0;
	entry->end = 0;
	entry->users = 0;
	entry->con = con;

	return entry;
}

D
David Teigland 已提交
1500
void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1501 1502 1503 1504 1505 1506 1507 1508 1509
{
	struct connection *con;
	struct writequeue_entry *e;
	int offset = 0;

	con = nodeid2con(nodeid, allocation);
	if (!con)
		return NULL;

1510
	spin_lock(&con->writequeue_lock);
1511
	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
P
Patrick Caulfield 已提交
1512
	if ((&e->list == &con->writequeue) ||
1513 1514 1515 1516 1517
	    (PAGE_CACHE_SIZE - e->end < len)) {
		e = NULL;
	} else {
		offset = e->end;
		e->end += len;
1518
		e->users++;
1519 1520 1521 1522
	}
	spin_unlock(&con->writequeue_lock);

	if (e) {
P
Patrick Caulfield 已提交
1523
	got_one:
1524 1525 1526 1527 1528 1529 1530 1531 1532
		*ppc = page_address(e->page) + offset;
		return e;
	}

	e = new_writequeue_entry(con, allocation);
	if (e) {
		spin_lock(&con->writequeue_lock);
		offset = e->end;
		e->end += len;
1533
		e->users++;
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
		list_add_tail(&e->list, &con->writequeue);
		spin_unlock(&con->writequeue_lock);
		goto got_one;
	}
	return NULL;
}

void dlm_lowcomms_commit_buffer(void *mh)
{
	struct writequeue_entry *e = (struct writequeue_entry *)mh;
	struct connection *con = e->con;
	int users;

1547
	spin_lock(&con->writequeue_lock);
1548 1549 1550 1551 1552 1553
	users = --e->users;
	if (users)
		goto out;
	e->len = e->end - e->offset;
	spin_unlock(&con->writequeue_lock);

1554 1555
	if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
		queue_work(send_workqueue, &con->swork);
1556 1557 1558
	}
	return;

P
Patrick Caulfield 已提交
1559
out:
1560 1561 1562 1563 1564
	spin_unlock(&con->writequeue_lock);
	return;
}

/* Send a message */
P
Patrick Caulfield 已提交
1565
static void send_to_sock(struct connection *con)
1566 1567 1568 1569 1570
{
	int ret = 0;
	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
	struct writequeue_entry *e;
	int len, offset;
1571
	int count = 0;
1572

1573
	mutex_lock(&con->sock_mutex);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	if (con->sock == NULL)
		goto out_connect;

	spin_lock(&con->writequeue_lock);
	for (;;) {
		e = list_entry(con->writequeue.next, struct writequeue_entry,
			       list);
		if ((struct list_head *) e == &con->writequeue)
			break;

		len = e->len;
		offset = e->offset;
		BUG_ON(len == 0 && e->users == 0);
		spin_unlock(&con->writequeue_lock);

		ret = 0;
		if (len) {
P
Paolo Bonzini 已提交
1591 1592
			ret = kernel_sendpage(con->sock, e->page, offset, len,
					      msg_flags);
1593
			if (ret == -EAGAIN || ret == 0) {
1594 1595 1596 1597 1598 1599 1600 1601 1602
				if (ret == -EAGAIN &&
				    test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) &&
				    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
					/* Notify TCP that we're limited by the
					 * application window size.
					 */
					set_bit(SOCK_NOSPACE, &con->sock->flags);
					con->sock->sk->sk_write_pending++;
				}
1603
				cond_resched();
1604
				goto out;
Y
Ying Xue 已提交
1605
			} else if (ret < 0)
1606
				goto send_error;
1607
		}
1608 1609 1610

		/* Don't starve people filling buffers */
		if (++count >= MAX_SEND_MSG_COUNT) {
P
Patrick Caulfield 已提交
1611
			cond_resched();
1612 1613
			count = 0;
		}
1614 1615

		spin_lock(&con->writequeue_lock);
M
Mike Christie 已提交
1616
		writequeue_entry_complete(e, ret);
1617 1618
	}
	spin_unlock(&con->writequeue_lock);
P
Patrick Caulfield 已提交
1619
out:
1620
	mutex_unlock(&con->sock_mutex);
P
Patrick Caulfield 已提交
1621
	return;
1622

P
Patrick Caulfield 已提交
1623
send_error:
1624
	mutex_unlock(&con->sock_mutex);
P
Patrick Caulfield 已提交
1625
	close_connection(con, false);
1626
	lowcomms_connect_sock(con);
P
Patrick Caulfield 已提交
1627
	return;
1628

P
Patrick Caulfield 已提交
1629
out_connect:
1630
	mutex_unlock(&con->sock_mutex);
1631 1632
	if (!test_bit(CF_INIT_PENDING, &con->flags))
		lowcomms_connect_sock(con);
1633 1634 1635 1636
}

static void clean_one_writequeue(struct connection *con)
{
1637
	struct writequeue_entry *e, *safe;
1638 1639

	spin_lock(&con->writequeue_lock);
1640
	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
		list_del(&e->list);
		free_entry(e);
	}
	spin_unlock(&con->writequeue_lock);
}

/* Called from recovery when it knows that a node has
   left the cluster */
int dlm_lowcomms_close(int nodeid)
{
	struct connection *con;
1652
	struct dlm_node_addr *na;
1653 1654 1655 1656

	log_print("closing connection to node %d", nodeid);
	con = nodeid2con(nodeid, 0);
	if (con) {
1657 1658 1659 1660 1661 1662 1663
		clear_bit(CF_CONNECT_PENDING, &con->flags);
		clear_bit(CF_WRITE_PENDING, &con->flags);
		set_bit(CF_CLOSE, &con->flags);
		if (cancel_work_sync(&con->swork))
			log_print("canceled swork for node %d", nodeid);
		if (cancel_work_sync(&con->rwork))
			log_print("canceled rwork for node %d", nodeid);
1664
		clean_one_writequeue(con);
P
Patrick Caulfield 已提交
1665
		close_connection(con, true);
1666
	}
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677

	spin_lock(&dlm_node_addrs_spin);
	na = find_node_addr(nodeid);
	if (na) {
		list_del(&na->list);
		while (na->addr_count--)
			kfree(na->addr[na->addr_count]);
		kfree(na);
	}
	spin_unlock(&dlm_node_addrs_spin);

1678 1679 1680
	return 0;
}

1681
/* Receive workqueue function */
1682
static void process_recv_sockets(struct work_struct *work)
1683
{
1684 1685
	struct connection *con = container_of(work, struct connection, rwork);
	int err;
1686

1687 1688 1689 1690
	clear_bit(CF_READ_PENDING, &con->flags);
	do {
		err = con->rx_action(con);
	} while (!err);
1691 1692
}

1693
/* Send workqueue function */
1694
static void process_send_sockets(struct work_struct *work)
1695
{
1696
	struct connection *con = container_of(work, struct connection, swork);
1697

1698
	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
1699
		con->connect_action(con);
1700
		set_bit(CF_WRITE_PENDING, &con->flags);
1701
	}
1702 1703
	if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags))
		send_to_sock(con);
1704 1705 1706 1707 1708 1709
}


/* Discard all entries on the write queues */
static void clean_writequeues(void)
{
1710
	foreach_conn(clean_one_writequeue);
1711 1712
}

1713
static void work_stop(void)
1714
{
1715 1716
	destroy_workqueue(recv_workqueue);
	destroy_workqueue(send_workqueue);
1717 1718
}

1719
static int work_start(void)
1720
{
1721 1722
	recv_workqueue = alloc_workqueue("dlm_recv",
					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1723 1724 1725
	if (!recv_workqueue) {
		log_print("can't start dlm_recv");
		return -ENOMEM;
1726 1727
	}

1728 1729
	send_workqueue = alloc_workqueue("dlm_send",
					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1730 1731
	if (!send_workqueue) {
		log_print("can't start dlm_send");
1732
		destroy_workqueue(recv_workqueue);
1733
		return -ENOMEM;
1734 1735 1736 1737 1738
	}

	return 0;
}

1739
static void stop_conn(struct connection *con)
1740
{
1741
	con->flags |= 0x0F;
1742
	if (con->sock && con->sock->sk)
1743 1744
		con->sock->sk->sk_user_data = NULL;
}
1745

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
static void free_conn(struct connection *con)
{
	close_connection(con, true);
	if (con->othercon)
		kmem_cache_free(con_cache, con->othercon);
	hlist_del(&con->list);
	kmem_cache_free(con_cache, con);
}

void dlm_lowcomms_stop(void)
{
P
Patrick Caulfield 已提交
1757
	/* Set all the flags to prevent any
1758 1759
	   socket activity.
	*/
1760
	mutex_lock(&connections_lock);
1761
	dlm_allow_conn = 0;
1762
	foreach_conn(stop_conn);
1763
	mutex_unlock(&connections_lock);
P
Patrick Caulfield 已提交
1764

1765
	work_stop();
1766

1767
	mutex_lock(&connections_lock);
1768 1769
	clean_writequeues();

1770 1771
	foreach_conn(free_conn);

1772
	mutex_unlock(&connections_lock);
1773 1774 1775 1776 1777
	kmem_cache_destroy(con_cache);
}

int dlm_lowcomms_start(void)
{
1778 1779
	int error = -EINVAL;
	struct connection *con;
1780 1781 1782 1783
	int i;

	for (i = 0; i < CONN_HASH_SIZE; i++)
		INIT_HLIST_HEAD(&connection_hash[i]);
1784

1785 1786
	init_local();
	if (!dlm_local_count) {
D
David Teigland 已提交
1787
		error = -ENOTCONN;
1788
		log_print("no local IP address has been set");
1789
		goto fail;
1790 1791
	}

1792
	error = -ENOMEM;
1793
	con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
P
Patrick Caulfield 已提交
1794
				      __alignof__(struct connection), 0,
1795
				      NULL);
1796
	if (!con_cache)
1797 1798 1799 1800 1801 1802 1803
		goto fail;

	error = work_start();
	if (error)
		goto fail_destroy;

	dlm_allow_conn = 1;
1804 1805

	/* Start listening */
1806 1807 1808 1809
	if (dlm_config.ci_protocol == 0)
		error = tcp_listen_for_all();
	else
		error = sctp_listen_for_all();
1810 1811 1812 1813 1814
	if (error)
		goto fail_unlisten;

	return 0;

P
Patrick Caulfield 已提交
1815
fail_unlisten:
1816
	dlm_allow_conn = 0;
1817 1818 1819 1820 1821
	con = nodeid2con(0,0);
	if (con) {
		close_connection(con, false);
		kmem_cache_free(con_cache, con);
	}
1822
fail_destroy:
1823
	kmem_cache_destroy(con_cache);
1824
fail:
1825 1826
	return error;
}
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840

void dlm_lowcomms_exit(void)
{
	struct dlm_node_addr *na, *safe;

	spin_lock(&dlm_node_addrs_spin);
	list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
		list_del(&na->list);
		while (na->addr_count--)
			kfree(na->addr[na->addr_count]);
		kfree(na);
	}
	spin_unlock(&dlm_node_addrs_spin);
}