xpc_channel.c 44.9 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 */

/*
 * Cross Partition Communication (XPC) channel support.
 *
 *	This is the part of XPC that manages the channels and
 *	sends/receives messages across them to/from other partitions.
 *
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
J
Jes Sorensen 已提交
22 23
#include <linux/mutex.h>
#include <linux/completion.h>
24
#include <asm/sn/sn_sal.h>
25
#include "xpc.h"
26

27 28 29
/*
 * Guarantee that the kzalloc'd memory is cacheline aligned.
 */
30
void *
31 32 33 34
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
	/* see if kzalloc will give us cachline aligned memory by default */
	*base = kzalloc(size, flags);
35
	if (*base == NULL)
36
		return NULL;
37 38

	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
39
		return *base;
40

41 42 43 44
	kfree(*base);

	/* nope, we'll have to do it ourselves */
	*base = kzalloc(size + L1_CACHE_BYTES, flags);
45
	if (*base == NULL)
46
		return NULL;
47

48
	return (void *)L1_CACHE_ALIGN((u64)*base);
49 50
}

51 52 53
/*
 * Allocate the local message queue and the notify queue.
 */
54
static enum xp_retval
55 56 57 58 59 60 61 62 63
xpc_allocate_local_msgqueue(struct xpc_channel *ch)
{
	unsigned long irq_flags;
	int nentries;
	size_t nbytes;

	for (nentries = ch->local_nentries; nentries > 0; nentries--) {

		nbytes = nentries * ch->msg_size;
64
		ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
65
								   GFP_KERNEL,
66 67
						      &ch->local_msgqueue_base);
		if (ch->local_msgqueue == NULL)
68 69 70
			continue;

		nbytes = nentries * sizeof(struct xpc_notify);
71
		ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
		if (ch->notify_queue == NULL) {
			kfree(ch->local_msgqueue_base);
			ch->local_msgqueue = NULL;
			continue;
		}

		spin_lock_irqsave(&ch->lock, irq_flags);
		if (nentries < ch->local_nentries) {
			dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
				"partid=%d, channel=%d\n", nentries,
				ch->local_nentries, ch->partid, ch->number);

			ch->local_nentries = nentries;
		}
		spin_unlock_irqrestore(&ch->lock, irq_flags);
87
		return xpSuccess;
88 89 90 91
	}

	dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
		"queue, partid=%d, channel=%d\n", ch->partid, ch->number);
92
	return xpNoMemory;
93 94 95 96 97
}

/*
 * Allocate the cached remote message queue.
 */
98
static enum xp_retval
99 100 101 102 103 104 105 106 107 108 109
xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
{
	unsigned long irq_flags;
	int nentries;
	size_t nbytes;

	DBUG_ON(ch->remote_nentries <= 0);

	for (nentries = ch->remote_nentries; nentries > 0; nentries--) {

		nbytes = nentries * ch->msg_size;
110
		ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
111
								    GFP_KERNEL,
112 113
						     &ch->remote_msgqueue_base);
		if (ch->remote_msgqueue == NULL)
114 115 116 117 118 119 120 121 122 123 124
			continue;

		spin_lock_irqsave(&ch->lock, irq_flags);
		if (nentries < ch->remote_nentries) {
			dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
				"partid=%d, channel=%d\n", nentries,
				ch->remote_nentries, ch->partid, ch->number);

			ch->remote_nentries = nentries;
		}
		spin_unlock_irqrestore(&ch->lock, irq_flags);
125
		return xpSuccess;
126 127 128 129
	}

	dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
		"partid=%d, channel=%d\n", ch->partid, ch->number);
130
	return xpNoMemory;
131 132 133 134 135 136 137
}

/*
 * Allocate message queues and other stuff associated with a channel.
 *
 * Note: Assumes all of the channel sizes are filled in.
 */
138
static enum xp_retval
139 140 141
xpc_allocate_msgqueues(struct xpc_channel *ch)
{
	unsigned long irq_flags;
142
	enum xp_retval ret;
143 144 145

	DBUG_ON(ch->flags & XPC_C_SETUP);

146
	ret = xpc_allocate_local_msgqueue(ch);
147
	if (ret != xpSuccess)
148 149
		return ret;

150
	ret = xpc_allocate_remote_msgqueue(ch);
151
	if (ret != xpSuccess) {
152 153 154 155 156 157 158 159 160 161 162
		kfree(ch->local_msgqueue_base);
		ch->local_msgqueue = NULL;
		kfree(ch->notify_queue);
		ch->notify_queue = NULL;
		return ret;
	}

	spin_lock_irqsave(&ch->lock, irq_flags);
	ch->flags |= XPC_C_SETUP;
	spin_unlock_irqrestore(&ch->lock, irq_flags);

163
	return xpSuccess;
164 165 166 167 168 169 170 171 172 173 174
}

/*
 * Process a connect message from a remote partition.
 *
 * Note: xpc_process_connect() is expecting to be called with the
 * spin_lock_irqsave held and will leave it locked upon return.
 */
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
175
	enum xp_retval ret;
176 177 178 179

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (!(ch->flags & XPC_C_OPENREQUEST) ||
180
	    !(ch->flags & XPC_C_ROPENREQUEST)) {
181 182 183 184 185 186 187 188 189 190
		/* nothing more to do for now */
		return;
	}
	DBUG_ON(!(ch->flags & XPC_C_CONNECTING));

	if (!(ch->flags & XPC_C_SETUP)) {
		spin_unlock_irqrestore(&ch->lock, *irq_flags);
		ret = xpc_allocate_msgqueues(ch);
		spin_lock_irqsave(&ch->lock, *irq_flags);

191
		if (ret != xpSuccess)
192
			XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
193 194

		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
195 196 197 198 199 200 201 202 203 204 205 206
			return;

		DBUG_ON(!(ch->flags & XPC_C_SETUP));
		DBUG_ON(ch->local_msgqueue == NULL);
		DBUG_ON(ch->remote_msgqueue == NULL);
	}

	if (!(ch->flags & XPC_C_OPENREPLY)) {
		ch->flags |= XPC_C_OPENREPLY;
		xpc_IPI_send_openreply(ch, irq_flags);
	}

207
	if (!(ch->flags & XPC_C_ROPENREPLY))
208 209 210 211 212 213 214
		return;

	DBUG_ON(ch->remote_msgqueue_pa == 0);

	ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);	/* clear all else */

	dev_info(xpc_chan, "channel %d to partition %d connected\n",
215
		 ch->number, ch->partid);
216 217

	spin_unlock_irqrestore(&ch->lock, *irq_flags);
218
	xpc_create_kthreads(ch, 1, 0);
219 220 221
	spin_lock_irqsave(&ch->lock, *irq_flags);
}

222 223 224 225
/*
 * Notify those who wanted to be notified upon delivery of their message.
 */
static void
226
xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put)
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
{
	struct xpc_notify *notify;
	u8 notify_type;
	s64 get = ch->w_remote_GP.get - 1;

	while (++get < put && atomic_read(&ch->n_to_notify) > 0) {

		notify = &ch->notify_queue[get % ch->local_nentries];

		/*
		 * See if the notify entry indicates it was associated with
		 * a message who's sender wants to be notified. It is possible
		 * that it is, but someone else is doing or has done the
		 * notification.
		 */
		notify_type = notify->type;
		if (notify_type == 0 ||
244
		    cmpxchg(&notify->type, notify_type, 0) != notify_type) {
245 246 247 248 249 250 251 252 253 254
			continue;
		}

		DBUG_ON(notify_type != XPC_N_CALL);

		atomic_dec(&ch->n_to_notify);

		if (notify->func != NULL) {
			dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
				"msg_number=%ld, partid=%d, channel=%d\n",
255
				(void *)notify, get, ch->partid, ch->number);
256 257

			notify->func(reason, ch->partid, ch->number,
258
				     notify->key);
259 260 261

			dev_dbg(xpc_chan, "notify->func() returned, "
				"notify=0x%p, msg_number=%ld, partid=%d, "
262
				"channel=%d\n", (void *)notify, get,
263 264 265 266 267
				ch->partid, ch->number);
		}
	}
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
/*
 * Free up message queues and other stuff that were allocated for the specified
 * channel.
 *
 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
 * they're cleared when XPC_C_DISCONNECTED is cleared.
 */
static void
xpc_free_msgqueues(struct xpc_channel *ch)
{
	DBUG_ON(!spin_is_locked(&ch->lock));
	DBUG_ON(atomic_read(&ch->n_to_notify) != 0);

	ch->remote_msgqueue_pa = 0;
	ch->func = NULL;
	ch->key = NULL;
	ch->msg_size = 0;
	ch->local_nentries = 0;
	ch->remote_nentries = 0;
	ch->kthreads_assigned_limit = 0;
	ch->kthreads_idle_limit = 0;

	ch->local_GP->get = 0;
	ch->local_GP->put = 0;
	ch->remote_GP.get = 0;
	ch->remote_GP.put = 0;
	ch->w_local_GP.get = 0;
	ch->w_local_GP.put = 0;
	ch->w_remote_GP.get = 0;
	ch->w_remote_GP.put = 0;
	ch->next_msg_to_pull = 0;

	if (ch->flags & XPC_C_SETUP) {
		ch->flags &= ~XPC_C_SETUP;

		dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
			ch->flags, ch->partid, ch->number);

		kfree(ch->local_msgqueue_base);
		ch->local_msgqueue = NULL;
		kfree(ch->remote_msgqueue_base);
		ch->remote_msgqueue = NULL;
		kfree(ch->notify_queue);
		ch->notify_queue = NULL;
	}
}

/*
 * spin_lock_irqsave() is expected to be held on entry.
 */
static void
xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
{
	struct xpc_partition *part = &xpc_partitions[ch->partid];
322
	u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
323 324 325

	DBUG_ON(!spin_is_locked(&ch->lock));

326
	if (!(ch->flags & XPC_C_DISCONNECTING))
327 328 329 330 331 332
		return;

	DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));

	/* make sure all activity has settled down first */

333
	if (atomic_read(&ch->kthreads_assigned) > 0 ||
334
	    atomic_read(&ch->references) > 0) {
335 336
		return;
	}
337
	DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
338
		!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
339

340 341
	if (part->act_state == XPC_P_DEACTIVATING) {
		/* can't proceed until the other side disengages from us */
342
		if (xpc_partition_engaged(1UL << ch->partid))
343
			return;
344

345
	} else {
346 347 348

		/* as long as the other side is up do the full protocol */

349
		if (!(ch->flags & XPC_C_RCLOSEREQUEST))
350 351 352 353 354 355 356
			return;

		if (!(ch->flags & XPC_C_CLOSEREPLY)) {
			ch->flags |= XPC_C_CLOSEREPLY;
			xpc_IPI_send_closereply(ch, irq_flags);
		}

357
		if (!(ch->flags & XPC_C_RCLOSEREPLY))
358 359 360
			return;
	}

361 362 363 364 365 366
	/* wake those waiting for notify completion */
	if (atomic_read(&ch->n_to_notify) > 0) {
		/* >>> we do callout while holding ch->lock */
		xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put);
	}

367 368
	/* both sides are disconnected now */

369
	if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
370
		spin_unlock_irqrestore(&ch->lock, *irq_flags);
371
		xpc_disconnect_callout(ch, xpDisconnected);
372 373 374
		spin_lock_irqsave(&ch->lock, *irq_flags);
	}

375 376 377 378 379
	/* it's now safe to free the channel's message queues */
	xpc_free_msgqueues(ch);

	/* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */
	ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
380 381 382

	atomic_dec(&part->nchannels_active);

383
	if (channel_was_connected) {
384
		dev_info(xpc_chan, "channel %d to partition %d disconnected, "
385
			 "reason=%d\n", ch->number, ch->partid, ch->reason);
386
	}
387 388

	if (ch->flags & XPC_C_WDISCONNECT) {
J
Jes Sorensen 已提交
389 390
		/* we won't lose the CPU since we're holding ch->lock */
		complete(&ch->wdisconnect_wait);
391 392 393 394 395
	} else if (ch->delayed_IPI_flags) {
		if (part->act_state != XPC_P_DEACTIVATING) {
			/* time to take action on any delayed IPI flags */
			spin_lock(&part->IPI_lock);
			XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
396
					  ch->delayed_IPI_flags);
397 398 399
			spin_unlock(&part->IPI_lock);
		}
		ch->delayed_IPI_flags = 0;
400
	}
401 402 403 404 405 406 407
}

/*
 * Process a change in the channel's remote connection state.
 */
static void
xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
408
			  u8 IPI_flags)
409 410 411
{
	unsigned long irq_flags;
	struct xpc_openclose_args *args =
412
	    &part->remote_openclose_args[ch_number];
413
	struct xpc_channel *ch = &part->channels[ch_number];
414
	enum xp_retval reason;
415 416 417

	spin_lock_irqsave(&ch->lock, irq_flags);

418
again:
419

420 421
	if ((ch->flags & XPC_C_DISCONNECTED) &&
	    (ch->flags & XPC_C_WDISCONNECT)) {
422 423 424 425 426 427 428 429 430
		/*
		 * Delay processing IPI flags until thread waiting disconnect
		 * has had a chance to see that the channel is disconnected.
		 */
		ch->delayed_IPI_flags |= IPI_flags;
		spin_unlock_irqrestore(&ch->lock, irq_flags);
		return;
	}

431 432 433 434 435 436 437 438 439
	if (IPI_flags & XPC_IPI_CLOSEREQUEST) {

		dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
			"from partid=%d, channel=%d\n", args->reason,
			ch->partid, ch->number);

		/*
		 * If RCLOSEREQUEST is set, we're probably waiting for
		 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
440
		 * with this RCLOSEREQUEST in the IPI_flags.
441 442 443 444 445 446 447 448 449 450 451 452 453 454
		 */

		if (ch->flags & XPC_C_RCLOSEREQUEST) {
			DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
			DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
			DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
			DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);

			DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
			IPI_flags &= ~XPC_IPI_CLOSEREPLY;
			ch->flags |= XPC_C_RCLOSEREPLY;

			/* both sides have finished disconnecting */
			xpc_process_disconnect(ch, &irq_flags);
455 456
			DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
			goto again;
457 458 459 460
		}

		if (ch->flags & XPC_C_DISCONNECTED) {
			if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
461
				if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
462 463
						       ch_number) &
				     XPC_IPI_OPENREQUEST)) {
464 465 466 467

					DBUG_ON(ch->delayed_IPI_flags != 0);
					spin_lock(&part->IPI_lock);
					XPC_SET_IPI_FLAGS(part->local_IPI_amo,
468 469
							  ch_number,
							  XPC_IPI_CLOSEREQUEST);
470 471
					spin_unlock(&part->IPI_lock);
				}
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
				spin_unlock_irqrestore(&ch->lock, irq_flags);
				return;
			}

			XPC_SET_REASON(ch, 0, 0);
			ch->flags &= ~XPC_C_DISCONNECTED;

			atomic_inc(&part->nchannels_active);
			ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
		}

		IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);

		/*
		 * The meaningful CLOSEREQUEST connection state fields are:
		 *      reason = reason connection is to be closed
		 */

		ch->flags |= XPC_C_RCLOSEREQUEST;

		if (!(ch->flags & XPC_C_DISCONNECTING)) {
			reason = args->reason;
494 495 496 497
			if (reason <= xpSuccess || reason > xpUnknownReason)
				reason = xpUnknownReason;
			else if (reason == xpUnregistering)
				reason = xpOtherUnregistering;
498 499

			XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
500 501 502 503

			DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY);
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
504
		}
505 506

		xpc_process_disconnect(ch, &irq_flags);
507 508 509 510 511 512 513 514 515 516 517 518 519 520
	}

	if (IPI_flags & XPC_IPI_CLOSEREPLY) {

		dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
			" channel=%d\n", ch->partid, ch->number);

		if (ch->flags & XPC_C_DISCONNECTED) {
			DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}

		DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
521 522 523

		if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
			if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
524
			     & XPC_IPI_CLOSEREQUEST)) {
525 526 527 528

				DBUG_ON(ch->delayed_IPI_flags != 0);
				spin_lock(&part->IPI_lock);
				XPC_SET_IPI_FLAGS(part->local_IPI_amo,
529 530
						  ch_number,
						  XPC_IPI_CLOSEREPLY);
531 532 533 534 535
				spin_unlock(&part->IPI_lock);
			}
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

		ch->flags |= XPC_C_RCLOSEREPLY;

		if (ch->flags & XPC_C_CLOSEREPLY) {
			/* both sides have finished disconnecting */
			xpc_process_disconnect(ch, &irq_flags);
		}
	}

	if (IPI_flags & XPC_IPI_OPENREQUEST) {

		dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
			"local_nentries=%d) received from partid=%d, "
			"channel=%d\n", args->msg_size, args->local_nentries,
			ch->partid, ch->number);

552
		if (part->act_state == XPC_P_DEACTIVATING ||
553
		    (ch->flags & XPC_C_ROPENREQUEST)) {
554 555 556 557 558 559
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}

		if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
			ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST;
560 561 562 563
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}
		DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
564
				       XPC_C_OPENREQUEST)));
565
		DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
566
				     XPC_C_OPENREPLY | XPC_C_CONNECTED));
567 568 569 570 571 572

		/*
		 * The meaningful OPENREQUEST connection state fields are:
		 *      msg_size = size of channel's messages in bytes
		 *      local_nentries = remote partition's local_nentries
		 */
573 574 575 576 577
		if (args->msg_size == 0 || args->local_nentries == 0) {
			/* assume OPENREQUEST was delayed by mistake */
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}
578 579 580 581 582 583

		ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
		ch->remote_nentries = args->local_nentries;

		if (ch->flags & XPC_C_OPENREQUEST) {
			if (args->msg_size != ch->msg_size) {
584
				XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
585
						       &irq_flags);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
				spin_unlock_irqrestore(&ch->lock, irq_flags);
				return;
			}
		} else {
			ch->msg_size = args->msg_size;

			XPC_SET_REASON(ch, 0, 0);
			ch->flags &= ~XPC_C_DISCONNECTED;

			atomic_inc(&part->nchannels_active);
		}

		xpc_process_connect(ch, &irq_flags);
	}

	if (IPI_flags & XPC_IPI_OPENREPLY) {

		dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
			"local_nentries=%d, remote_nentries=%d) received from "
			"partid=%d, channel=%d\n", args->local_msgqueue_pa,
			args->local_nentries, args->remote_nentries,
			ch->partid, ch->number);

		if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}
613
		if (!(ch->flags & XPC_C_OPENREQUEST)) {
614
			XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
615
					       &irq_flags);
616 617 618 619
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}

620 621 622 623 624 625
		DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
		DBUG_ON(ch->flags & XPC_C_CONNECTED);

		/*
		 * The meaningful OPENREPLY connection state fields are:
		 *      local_msgqueue_pa = physical address of remote
626
		 *                          partition's local_msgqueue
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
		 *      local_nentries = remote partition's local_nentries
		 *      remote_nentries = remote partition's remote_nentries
		 */
		DBUG_ON(args->local_msgqueue_pa == 0);
		DBUG_ON(args->local_nentries == 0);
		DBUG_ON(args->remote_nentries == 0);

		ch->flags |= XPC_C_ROPENREPLY;
		ch->remote_msgqueue_pa = args->local_msgqueue_pa;

		if (args->local_nentries < ch->remote_nentries) {
			dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
				"remote_nentries=%d, old remote_nentries=%d, "
				"partid=%d, channel=%d\n",
				args->local_nentries, ch->remote_nentries,
				ch->partid, ch->number);

			ch->remote_nentries = args->local_nentries;
		}
		if (args->remote_nentries < ch->local_nentries) {
			dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
				"local_nentries=%d, old local_nentries=%d, "
				"partid=%d, channel=%d\n",
				args->remote_nentries, ch->local_nentries,
				ch->partid, ch->number);

			ch->local_nentries = args->remote_nentries;
		}

		xpc_process_connect(ch, &irq_flags);
	}

	spin_unlock_irqrestore(&ch->lock, irq_flags);
}

/*
 * Attempt to establish a channel connection to a remote partition.
 */
665
static enum xp_retval
666 667 668 669 670
xpc_connect_channel(struct xpc_channel *ch)
{
	unsigned long irq_flags;
	struct xpc_registration *registration = &xpc_registrations[ch->number];

671
	if (mutex_trylock(&registration->mutex) == 0)
672
		return xpRetry;
673 674

	if (!XPC_CHANNEL_REGISTERED(ch->number)) {
J
Jes Sorensen 已提交
675
		mutex_unlock(&registration->mutex);
676
		return xpUnregistered;
677 678 679 680 681 682 683 684 685
	}

	spin_lock_irqsave(&ch->lock, irq_flags);

	DBUG_ON(ch->flags & XPC_C_CONNECTED);
	DBUG_ON(ch->flags & XPC_C_OPENREQUEST);

	if (ch->flags & XPC_C_DISCONNECTING) {
		spin_unlock_irqrestore(&ch->lock, irq_flags);
J
Jes Sorensen 已提交
686
		mutex_unlock(&registration->mutex);
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
		return ch->reason;
	}

	/* add info from the channel connect registration to the channel */

	ch->kthreads_assigned_limit = registration->assigned_limit;
	ch->kthreads_idle_limit = registration->idle_limit;
	DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
	DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
	DBUG_ON(atomic_read(&ch->kthreads_active) != 0);

	ch->func = registration->func;
	DBUG_ON(registration->func == NULL);
	ch->key = registration->key;

	ch->local_nentries = registration->nentries;

	if (ch->flags & XPC_C_ROPENREQUEST) {
		if (registration->msg_size != ch->msg_size) {
			/* the local and remote sides aren't the same */

			/*
			 * Because XPC_DISCONNECT_CHANNEL() can block we're
			 * forced to up the registration sema before we unlock
			 * the channel lock. But that's okay here because we're
			 * done with the part that required the registration
			 * sema. XPC_DISCONNECT_CHANNEL() requires that the
			 * channel lock be locked and will unlock and relock
			 * the channel lock as needed.
			 */
J
Jes Sorensen 已提交
717
			mutex_unlock(&registration->mutex);
718
			XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
719
					       &irq_flags);
720
			spin_unlock_irqrestore(&ch->lock, irq_flags);
721
			return xpUnequalMsgSizes;
722 723 724 725 726 727 728 729 730 731
		}
	} else {
		ch->msg_size = registration->msg_size;

		XPC_SET_REASON(ch, 0, 0);
		ch->flags &= ~XPC_C_DISCONNECTED;

		atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
	}

J
Jes Sorensen 已提交
732
	mutex_unlock(&registration->mutex);
733 734 735 736 737 738 739 740 741 742

	/* initiate the connection */

	ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
	xpc_IPI_send_openrequest(ch, &irq_flags);

	xpc_process_connect(ch, &irq_flags);

	spin_unlock_irqrestore(&ch->lock, irq_flags);

743
	return xpSuccess;
744 745 746 747 748 749 750 751 752 753 754 755 756
}

/*
 * Clear some of the msg flags in the local message queue.
 */
static inline void
xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
{
	struct xpc_msg *msg;
	s64 get;

	get = ch->w_remote_GP.get;
	do {
757 758 759
		msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
					 (get % ch->local_nentries) *
					 ch->msg_size);
760
		msg->flags = 0;
761
	} while (++get < ch->remote_GP.get);
762 763 764 765 766 767 768 769 770 771 772 773 774
}

/*
 * Clear some of the msg flags in the remote message queue.
 */
static inline void
xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
{
	struct xpc_msg *msg;
	s64 put;

	put = ch->w_remote_GP.put;
	do {
775 776 777
		msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
					 (put % ch->remote_nentries) *
					 ch->msg_size);
778
		msg->flags = 0;
779
	} while (++put < ch->remote_GP.put);
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
}

static void
xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
{
	struct xpc_channel *ch = &part->channels[ch_number];
	int nmsgs_sent;

	ch->remote_GP = part->remote_GPs[ch_number];

	/* See what, if anything, has changed for each connected channel */

	xpc_msgqueue_ref(ch);

	if (ch->w_remote_GP.get == ch->remote_GP.get &&
795
	    ch->w_remote_GP.put == ch->remote_GP.put) {
796 797 798 799 800
		/* nothing changed since GPs were last pulled */
		xpc_msgqueue_deref(ch);
		return;
	}

801
	if (!(ch->flags & XPC_C_CONNECTED)) {
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
		xpc_msgqueue_deref(ch);
		return;
	}

	/*
	 * First check to see if messages recently sent by us have been
	 * received by the other side. (The remote GET value will have
	 * changed since we last looked at it.)
	 */

	if (ch->w_remote_GP.get != ch->remote_GP.get) {

		/*
		 * We need to notify any senders that want to be notified
		 * that their sent messages have been received by their
		 * intended recipients. We need to do this before updating
		 * w_remote_GP.get so that we don't allocate the same message
		 * queue entries prematurely (see xpc_allocate_msg()).
		 */
		if (atomic_read(&ch->n_to_notify) > 0) {
			/*
			 * Notify senders that messages sent have been
			 * received and delivered by the other side.
			 */
826
			xpc_notify_senders(ch, xpMsgDelivered,
827
					   ch->remote_GP.get);
828 829 830 831 832 833 834 835
		}

		/*
		 * Clear msg->flags in previously sent messages, so that
		 * they're ready for xpc_allocate_msg().
		 */
		xpc_clear_local_msgqueue_flags(ch);

836
		ch->w_remote_GP.get = ch->remote_GP.get;
837 838 839 840 841 842 843 844 845

		dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
			"channel=%d\n", ch->w_remote_GP.get, ch->partid,
			ch->number);

		/*
		 * If anyone was waiting for message queue entries to become
		 * available, wake them up.
		 */
846
		if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
			wake_up(&ch->msg_allocate_wq);
	}

	/*
	 * Now check for newly sent messages by the other side. (The remote
	 * PUT value will have changed since we last looked at it.)
	 */

	if (ch->w_remote_GP.put != ch->remote_GP.put) {
		/*
		 * Clear msg->flags in previously received messages, so that
		 * they're ready for xpc_get_deliverable_msg().
		 */
		xpc_clear_remote_msgqueue_flags(ch);

862
		ch->w_remote_GP.put = ch->remote_GP.put;
863 864 865 866 867 868 869 870 871 872 873

		dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
			"channel=%d\n", ch->w_remote_GP.put, ch->partid,
			ch->number);

		nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
		if (nmsgs_sent > 0) {
			dev_dbg(xpc_chan, "msgs waiting to be copied and "
				"delivered=%d, partid=%d, channel=%d\n",
				nmsgs_sent, ch->partid, ch->number);

874
			if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
875 876 877 878 879 880 881 882 883 884 885 886 887 888
				xpc_activate_kthreads(ch, nmsgs_sent);
		}
	}

	xpc_msgqueue_deref(ch);
}

void
xpc_process_channel_activity(struct xpc_partition *part)
{
	unsigned long irq_flags;
	u64 IPI_amo, IPI_flags;
	struct xpc_channel *ch;
	int ch_number;
889
	u32 ch_flags;
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909

	IPI_amo = xpc_get_IPI_flags(part);

	/*
	 * Initiate channel connections for registered channels.
	 *
	 * For each connected channel that has pending messages activate idle
	 * kthreads and/or create new kthreads as needed.
	 */

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		/*
		 * Process any open or close related IPI flags, and then deal
		 * with connecting or disconnecting the channel as required.
		 */

		IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);

910
		if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
911 912
			xpc_process_openclose_IPI(part, ch_number, IPI_flags);

913
		ch_flags = ch->flags;	/* need an atomic snapshot of flags */
914

915
		if (ch_flags & XPC_C_DISCONNECTING) {
916 917 918 919 920 921
			spin_lock_irqsave(&ch->lock, irq_flags);
			xpc_process_disconnect(ch, &irq_flags);
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			continue;
		}

922
		if (part->act_state == XPC_P_DEACTIVATING)
923 924
			continue;

925 926 927
		if (!(ch_flags & XPC_C_CONNECTED)) {
			if (!(ch_flags & XPC_C_OPENREQUEST)) {
				DBUG_ON(ch_flags & XPC_C_SETUP);
928
				(void)xpc_connect_channel(ch);
929 930 931 932 933 934 935 936 937 938 939 940 941 942
			} else {
				spin_lock_irqsave(&ch->lock, irq_flags);
				xpc_process_connect(ch, &irq_flags);
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			continue;
		}

		/*
		 * Process any message related IPI flags, this may involve the
		 * activation of kthreads to deliver any pending messages sent
		 * from the other partition.
		 */

943
		if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
944 945 946 947 948
			xpc_process_msg_IPI(part, ch_number);
	}
}

/*
949 950
 * XPC's heartbeat code calls this function to inform XPC that a partition is
 * going down.  XPC responds by tearing down the XPartition Communication
951 952 953 954 955 956 957
 * infrastructure used for the just downed partition.
 *
 * XPC's heartbeat code will never call this function and xpc_partition_up()
 * at the same time. Nor will it ever make multiple calls to either function
 * at the same time.
 */
void
958
xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
959 960 961 962 963 964 965 966 967 968 969 970 971
{
	unsigned long irq_flags;
	int ch_number;
	struct xpc_channel *ch;

	dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
		XPC_PARTID(part), reason);

	if (!xpc_part_ref(part)) {
		/* infrastructure for this partition isn't currently set up */
		return;
	}

972
	/* disconnect channels associated with the partition going down */
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		xpc_msgqueue_ref(ch);
		spin_lock_irqsave(&ch->lock, irq_flags);

		XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);

		spin_unlock_irqrestore(&ch->lock, irq_flags);
		xpc_msgqueue_deref(ch);
	}

	xpc_wakeup_channel_mgr(part);

	xpc_part_deref(part);
}

/*
 * Called by XP at the time of channel connection registration to cause
 * XPC to establish connections to all currently active partitions.
 */
void
xpc_initiate_connect(int ch_number)
{
998
	short partid;
999 1000 1001
	struct xpc_partition *part;
	struct xpc_channel *ch;

1002
	DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
1003

1004
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1005 1006 1007 1008 1009
		part = &xpc_partitions[partid];

		if (xpc_part_ref(part)) {
			ch = &part->channels[ch_number];

1010 1011 1012 1013 1014
			/*
			 * Initiate the establishment of a connection on the
			 * newly registered channel to the remote partition.
			 */
			xpc_wakeup_channel_mgr(part);
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
			xpc_part_deref(part);
		}
	}
}

void
xpc_connected_callout(struct xpc_channel *ch)
{
	/* let the registerer know that a connection has been established */

	if (ch->func != NULL) {
1026
		dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
1027 1028
			"partid=%d, channel=%d\n", ch->partid, ch->number);

1029
		ch->func(xpConnected, ch->partid, ch->number,
1030
			 (void *)(u64)ch->local_nentries, ch->key);
1031

1032
		dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
			"partid=%d, channel=%d\n", ch->partid, ch->number);
	}
}

/*
 * Called by XP at the time of channel connection unregistration to cause
 * XPC to teardown all current connections for the specified channel.
 *
 * Before returning xpc_initiate_disconnect() will wait until all connections
 * on the specified channel have been closed/torndown. So the caller can be
 * assured that they will not be receiving any more callouts from XPC to the
 * function they registered via xpc_connect().
 *
 * Arguments:
 *
 *	ch_number - channel # to unregister.
 */
void
xpc_initiate_disconnect(int ch_number)
{
	unsigned long irq_flags;
1054
	short partid;
1055 1056 1057
	struct xpc_partition *part;
	struct xpc_channel *ch;

1058
	DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
1059 1060

	/* initiate the channel disconnect for every active partition */
1061
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1062 1063 1064 1065 1066 1067 1068 1069
		part = &xpc_partitions[partid];

		if (xpc_part_ref(part)) {
			ch = &part->channels[ch_number];
			xpc_msgqueue_ref(ch);

			spin_lock_irqsave(&ch->lock, irq_flags);

1070 1071 1072
			if (!(ch->flags & XPC_C_DISCONNECTED)) {
				ch->flags |= XPC_C_WDISCONNECT;

1073
				XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
1074
						       &irq_flags);
1075
			}
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089

			spin_unlock_irqrestore(&ch->lock, irq_flags);

			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
		}
	}

	xpc_disconnect_wait(ch_number);
}

/*
 * To disconnect a channel, and reflect it back to all who may be waiting.
 *
1090 1091 1092
 * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
 * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
 * xpc_disconnect_wait().
1093 1094 1095 1096 1097
 *
 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
 */
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1098
		       enum xp_retval reason, unsigned long *irq_flags)
1099
{
1100
	u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
1101 1102 1103

	DBUG_ON(!spin_is_locked(&ch->lock));

1104
	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
1105
		return;
1106

1107 1108 1109 1110 1111 1112 1113
	DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));

	dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
		reason, line, ch->partid, ch->number);

	XPC_SET_REASON(ch, reason, line);

1114
	ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
1115 1116
	/* some of these may not have been set */
	ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1117 1118
		       XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
		       XPC_C_CONNECTING | XPC_C_CONNECTED);
1119 1120 1121

	xpc_IPI_send_closerequest(ch, irq_flags);

1122
	if (channel_was_connected)
1123 1124
		ch->flags |= XPC_C_WASCONNECTED;

1125 1126 1127
	spin_unlock_irqrestore(&ch->lock, *irq_flags);

	/* wake all idle kthreads so they can exit */
1128 1129
	if (atomic_read(&ch->kthreads_idle) > 0) {
		wake_up_all(&ch->idle_wq);
1130 1131

	} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1132
		   !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1133
		/* start a kthread that will do the xpDisconnecting callout */
1134
		xpc_create_kthreads(ch, 1, 1);
1135 1136 1137
	}

	/* wake those waiting to allocate an entry from the local msg queue */
1138
	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1139 1140 1141 1142 1143 1144
		wake_up(&ch->msg_allocate_wq);

	spin_lock_irqsave(&ch->lock, *irq_flags);
}

void
1145
xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
1146 1147
{
	/*
1148
	 * Let the channel's registerer know that the channel is being
1149
	 * disconnected. We don't want to do this if the registerer was never
1150
	 * informed of a connection being made.
1151 1152 1153
	 */

	if (ch->func != NULL) {
1154 1155
		dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
			"channel=%d\n", reason, ch->partid, ch->number);
1156

1157
		ch->func(reason, ch->partid, ch->number, NULL, ch->key);
1158

1159 1160
		dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
			"channel=%d\n", reason, ch->partid, ch->number);
1161 1162 1163 1164 1165 1166 1167
	}
}

/*
 * Wait for a message entry to become available for the specified channel,
 * but don't wait any longer than 1 jiffy.
 */
1168
static enum xp_retval
1169 1170
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
1171
	enum xp_retval ret;
1172 1173

	if (ch->flags & XPC_C_DISCONNECTING) {
1174
		DBUG_ON(ch->reason == xpInterrupted);
1175 1176 1177 1178 1179 1180 1181 1182 1183
		return ch->reason;
	}

	atomic_inc(&ch->n_on_msg_allocate_wq);
	ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
	atomic_dec(&ch->n_on_msg_allocate_wq);

	if (ch->flags & XPC_C_DISCONNECTING) {
		ret = ch->reason;
1184
		DBUG_ON(ch->reason == xpInterrupted);
1185
	} else if (ret == 0) {
1186
		ret = xpTimeout;
1187
	} else {
1188
		ret = xpInterrupted;
1189 1190 1191 1192 1193 1194 1195 1196 1197
	}

	return ret;
}

/*
 * Allocate an entry for a message from the message queue associated with the
 * specified channel.
 */
1198
static enum xp_retval
1199
xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1200
		 struct xpc_msg **address_of_msg)
1201 1202
{
	struct xpc_msg *msg;
1203
	enum xp_retval ret;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
	s64 put;

	/* this reference will be dropped in xpc_send_msg() */
	xpc_msgqueue_ref(ch);

	if (ch->flags & XPC_C_DISCONNECTING) {
		xpc_msgqueue_deref(ch);
		return ch->reason;
	}
	if (!(ch->flags & XPC_C_CONNECTED)) {
		xpc_msgqueue_deref(ch);
1215
		return xpNotConnected;
1216 1217 1218 1219 1220 1221 1222
	}

	/*
	 * Get the next available message entry from the local message queue.
	 * If none are available, we'll make sure that we grab the latest
	 * GP values.
	 */
1223
	ret = xpTimeout;
1224 1225 1226

	while (1) {

1227 1228 1229
		put = ch->w_local_GP.put;
		rmb();	/* guarantee that .put loads before .get */
		if (put - ch->w_remote_GP.get < ch->local_nentries) {
1230 1231 1232 1233 1234 1235

			/* There are available message entries. We need to try
			 * to secure one for ourselves. We'll do this by trying
			 * to increment w_local_GP.put as long as someone else
			 * doesn't beat us to it. If they do, we'll have to
			 * try again.
1236 1237
			 */
			if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
				/* we got the entry referenced by put */
				break;
			}
			continue;	/* try again */
		}

		/*
		 * There aren't any available msg entries at this time.
		 *
		 * In waiting for a message entry to become available,
		 * we set a timeout in case the other side is not
		 * sending completion IPIs. This lets us fake an IPI
		 * that will cause the IPI handler to fetch the latest
		 * GP values as if an IPI was sent by the other side.
		 */
1253
		if (ret == xpTimeout)
1254 1255 1256 1257
			xpc_IPI_send_local_msgrequest(ch);

		if (flags & XPC_NOWAIT) {
			xpc_msgqueue_deref(ch);
1258
			return xpNoWait;
1259 1260 1261
		}

		ret = xpc_allocate_msg_wait(ch);
1262
		if (ret != xpInterrupted && ret != xpTimeout) {
1263 1264 1265 1266 1267 1268
			xpc_msgqueue_deref(ch);
			return ret;
		}
	}

	/* get the message's address and initialize it */
1269 1270
	msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
				 (put % ch->local_nentries) * ch->msg_size);
1271 1272 1273 1274 1275 1276

	DBUG_ON(msg->flags != 0);
	msg->number = put;

	dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
		"msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1277
		(void *)msg, msg->number, ch->partid, ch->number);
1278 1279 1280

	*address_of_msg = msg;

1281
	return xpSuccess;
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
}

/*
 * Allocate an entry for a message from the message queue associated with the
 * specified channel. NOTE that this routine can sleep waiting for a message
 * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
 *
 * Arguments:
 *
 *	partid - ID of partition to which the channel is connected.
 *	ch_number - channel #.
 *	flags - see xpc.h for valid flags.
 *	payload - address of the allocated payload area pointer (filled in on
 * 	          return) in which the user-defined message is constructed.
 */
1297
enum xp_retval
1298
xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload)
1299 1300
{
	struct xpc_partition *part = &xpc_partitions[partid];
1301
	enum xp_retval ret = xpUnknownReason;
1302
	struct xpc_msg *msg = NULL;
1303

1304
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1305 1306 1307 1308 1309 1310 1311 1312
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);

	*payload = NULL;

	if (xpc_part_ref(part)) {
		ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
		xpc_part_deref(part);

1313
		if (msg != NULL)
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
			*payload = &msg->payload;
	}

	return ret;
}

/*
 * Now we actually send the messages that are ready to be sent by advancing
 * the local message queue's Put value and then send an IPI to the recipient
 * partition.
 */
static void
xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
{
	struct xpc_msg *msg;
	s64 put = initial_put + 1;
	int send_IPI = 0;

	while (1) {

		while (1) {
1335
			if (put == ch->w_local_GP.put)
1336 1337
				break;

1338 1339 1340
			msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
						 (put % ch->local_nentries) *
						 ch->msg_size);
1341

1342
			if (!(msg->flags & XPC_M_READY))
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
				break;

			put++;
		}

		if (put == initial_put) {
			/* nothing's changed */
			break;
		}

		if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1354
		    initial_put) {
1355
			/* someone else beat us to it */
1356
			DBUG_ON(ch->local_GP->put < initial_put);
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
			break;
		}

		/* we just set the new value of local_GP->put */

		dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
			"channel=%d\n", put, ch->partid, ch->number);

		send_IPI = 1;

		/*
		 * We need to ensure that the message referenced by
		 * local_GP->put is not XPC_M_READY or that local_GP->put
		 * equals w_local_GP.put, so we'll go have a look.
		 */
		initial_put = put;
	}

1375
	if (send_IPI)
1376 1377 1378 1379 1380 1381 1382 1383
		xpc_IPI_send_msgrequest(ch);
}

/*
 * Common code that does the actual sending of the message by advancing the
 * local message queue's Put value and sends an IPI to the partition the
 * message is being sent to.
 */
1384
static enum xp_retval
1385
xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1386
	     xpc_notify_func func, void *key)
1387
{
1388
	enum xp_retval ret = xpSuccess;
1389
	struct xpc_notify *notify = notify;
1390 1391 1392
	s64 put, msg_number = msg->number;

	DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1393 1394
	DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
		msg_number % ch->local_nentries);
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
	DBUG_ON(msg->flags & XPC_M_READY);

	if (ch->flags & XPC_C_DISCONNECTING) {
		/* drop the reference grabbed in xpc_allocate_msg() */
		xpc_msgqueue_deref(ch);
		return ch->reason;
	}

	if (notify_type != 0) {
		/*
		 * Tell the remote side to send an ACK interrupt when the
		 * message has been delivered.
		 */
		msg->flags |= XPC_M_INTERRUPT;

		atomic_inc(&ch->n_to_notify);

		notify = &ch->notify_queue[msg_number % ch->local_nentries];
		notify->func = func;
		notify->key = key;
1415
		notify->type = notify_type;
1416

1417
		/* >>> is a mb() needed here? */
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427

		if (ch->flags & XPC_C_DISCONNECTING) {
			/*
			 * An error occurred between our last error check and
			 * this one. We will try to clear the type field from
			 * the notify entry. If we succeed then
			 * xpc_disconnect_channel() didn't already process
			 * the notify entry.
			 */
			if (cmpxchg(&notify->type, notify_type, 0) ==
1428
			    notify_type) {
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
				atomic_dec(&ch->n_to_notify);
				ret = ch->reason;
			}

			/* drop the reference grabbed in xpc_allocate_msg() */
			xpc_msgqueue_deref(ch);
			return ret;
		}
	}

	msg->flags |= XPC_M_READY;

	/*
	 * The preceding store of msg->flags must occur before the following
	 * load of ch->local_GP->put.
	 */
	mb();

	/* see if the message is next in line to be sent, if so send it */

	put = ch->local_GP->put;
1450
	if (put == msg_number)
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
		xpc_send_msgs(ch, put);

	/* drop the reference grabbed in xpc_allocate_msg() */
	xpc_msgqueue_deref(ch);
	return ret;
}

/*
 * Send a message previously allocated using xpc_initiate_allocate() on the
 * specified channel connected to the specified partition.
 *
 * This routine will not wait for the message to be received, nor will
 * notification be given when it does happen. Once this routine has returned
 * the message entry allocated via xpc_initiate_allocate() is no longer
 * accessable to the caller.
 *
 * This routine, although called by users, does not call xpc_part_ref() to
 * ensure that the partition infrastructure is in place. It relies on the
 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
 *
 * Arguments:
 *
 *	partid - ID of partition to which the channel is connected.
 *	ch_number - channel # to send message on.
 *	payload - pointer to the payload area allocated via
 *			xpc_initiate_allocate().
 */
1478
enum xp_retval
1479
xpc_initiate_send(short partid, int ch_number, void *payload)
1480 1481 1482
{
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
1483
	enum xp_retval ret;
1484

1485
	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
1486 1487
		partid, ch_number);

1488
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
	DBUG_ON(msg == NULL);

	ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);

	return ret;
}

/*
 * Send a message previously allocated using xpc_initiate_allocate on the
 * specified channel connected to the specified partition.
 *
 * This routine will not wait for the message to be sent. Once this routine
 * has returned the message entry allocated via xpc_initiate_allocate() is no
 * longer accessable to the caller.
 *
 * Once the remote end of the channel has received the message, the function
 * passed as an argument to xpc_initiate_send_notify() will be called. This
 * allows the sender to free up or re-use any buffers referenced by the
 * message, but does NOT mean the message has been processed at the remote
 * end by a receiver.
 *
 * If this routine returns an error, the caller's function will NOT be called.
 *
 * This routine, although called by users, does not call xpc_part_ref() to
 * ensure that the partition infrastructure is in place. It relies on the
 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
 *
 * Arguments:
 *
 *	partid - ID of partition to which the channel is connected.
 *	ch_number - channel # to send message on.
 *	payload - pointer to the payload area allocated via
 *			xpc_initiate_allocate().
 *	func - function to call with asynchronous notification of message
 *		  receipt. THIS FUNCTION MUST BE NON-BLOCKING.
 *	key - user-defined key to be passed to the function when it's called.
 */
1527
enum xp_retval
1528
xpc_initiate_send_notify(short partid, int ch_number, void *payload,
1529
			 xpc_notify_func func, void *key)
1530 1531 1532
{
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
1533
	enum xp_retval ret;
1534

1535
	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
1536 1537
		partid, ch_number);

1538
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1539 1540 1541 1542 1543
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
	DBUG_ON(msg == NULL);
	DBUG_ON(func == NULL);

	ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
1544
			   func, key);
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
	return ret;
}

/*
 * Deliver a message to its intended recipient.
 */
void
xpc_deliver_msg(struct xpc_channel *ch)
{
	struct xpc_msg *msg;

1556 1557
	msg = xpc_get_deliverable_msg(ch);
	if (msg != NULL) {
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570

		/*
		 * This ref is taken to protect the payload itself from being
		 * freed before the user is finished with it, which the user
		 * indicates by calling xpc_initiate_received().
		 */
		xpc_msgqueue_ref(ch);

		atomic_inc(&ch->kthreads_active);

		if (ch->func != NULL) {
			dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
				"msg_number=%ld, partid=%d, channel=%d\n",
1571
				(void *)msg, msg->number, ch->partid,
1572 1573 1574
				ch->number);

			/* deliver the message to its intended recipient */
1575
			ch->func(xpMsgReceived, ch->partid, ch->number,
1576
				 &msg->payload, ch->key);
1577 1578 1579

			dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
				"msg_number=%ld, partid=%d, channel=%d\n",
1580
				(void *)msg, msg->number, ch->partid,
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
				ch->number);
		}

		atomic_dec(&ch->kthreads_active);
	}
}

/*
 * Now we actually acknowledge the messages that have been delivered and ack'd
 * by advancing the cached remote message queue's Get value and if requested
 * send an IPI to the message sender's partition.
 */
static void
xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
{
	struct xpc_msg *msg;
	s64 get = initial_get + 1;
	int send_IPI = 0;

	while (1) {

		while (1) {
1603
			if (get == ch->w_local_GP.get)
1604 1605
				break;

1606 1607 1608
			msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
						 (get % ch->remote_nentries) *
						 ch->msg_size);
1609

1610
			if (!(msg->flags & XPC_M_DONE))
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
				break;

			msg_flags |= msg->flags;
			get++;
		}

		if (get == initial_get) {
			/* nothing's changed */
			break;
		}

		if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
1623
		    initial_get) {
1624
			/* someone else beat us to it */
1625
			DBUG_ON(ch->local_GP->get <= initial_get);
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
			break;
		}

		/* we just set the new value of local_GP->get */

		dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
			"channel=%d\n", get, ch->partid, ch->number);

		send_IPI = (msg_flags & XPC_M_INTERRUPT);

		/*
		 * We need to ensure that the message referenced by
		 * local_GP->get is not XPC_M_DONE or that local_GP->get
		 * equals w_local_GP.get, so we'll go have a look.
		 */
		initial_get = get;
	}

1644
	if (send_IPI)
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
		xpc_IPI_send_msgrequest(ch);
}

/*
 * Acknowledge receipt of a delivered message.
 *
 * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
 * that sent the message.
 *
 * This function, although called by users, does not call xpc_part_ref() to
 * ensure that the partition infrastructure is in place. It relies on the
 * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
 *
 * Arguments:
 *
 *	partid - ID of partition to which the channel is connected.
 *	ch_number - channel # message received on.
 *	payload - pointer to the payload area allocated via
 *			xpc_initiate_allocate().
 */
void
1666
xpc_initiate_received(short partid, int ch_number, void *payload)
1667 1668 1669 1670 1671 1672
{
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
	s64 get, msg_number = msg->number;

1673
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1674 1675 1676 1677 1678
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);

	ch = &part->channels[ch_number];

	dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
1679
		(void *)msg, msg_number, ch->partid, ch->number);
1680

1681 1682
	DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
		msg_number % ch->remote_nentries);
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
	DBUG_ON(msg->flags & XPC_M_DONE);

	msg->flags |= XPC_M_DONE;

	/*
	 * The preceding store of msg->flags must occur before the following
	 * load of ch->local_GP->get.
	 */
	mb();

	/*
	 * See if this message is next in line to be acknowledged as having
	 * been delivered.
	 */
	get = ch->local_GP->get;
1698
	if (get == msg_number)
1699 1700 1701 1702 1703
		xpc_acknowledge_msgs(ch, get, msg->flags);

	/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg()  */
	xpc_msgqueue_deref(ch);
}