xpc_main.c 33.3 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 */

/*
 * Cross Partition Communication (XPC) support - standard version.
 *
 *	XPC provides a message passing capability that crosses partition
 *	boundaries. This module is made up of two parts:
 *
 *	    partition	This part detects the presence/absence of other
 *			partitions. It provides a heartbeat and monitors
 *			the heartbeats of other partitions.
 *
 *	    channel	This part manages the channels and sends/receives
 *			messages across them to/from other partitions.
 *
 *	There are a couple of additional functions residing in XP, which
 *	provide an interface to XPC for its users.
 *
 *
 *	Caveats:
 *
28
 *	  . Currently on sn2, we have no way to determine which nasid an IRQ
29 30 31 32 33
 *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
 *	    followed by an IPI. The amo indicates where data is to be pulled
 *	    from, so after the IPI arrives, the remote partition checks the amo
 *	    word. The IPI can actually arrive before the amo however, so other
 *	    code must periodically check for this case. Also, remote amo
34 35 36 37
 *	    operations do not reliably time out. Thus we do a remote PIO read
 *	    solely to know whether the remote partition is down and whether we
 *	    should stop sending IPIs to it. This remote PIO read operation is
 *	    set up in a special nofault region so SAL knows to ignore (and
38
 *	    cleanup) any errors due to the remote amo write, PIO read, and/or
39
 *	    PIO write operations.
40 41 42 43 44 45 46
 *
 *	    If/when new hardware solves this IPI problem, we should abandon
 *	    the current approach.
 *
 */

#include <linux/module.h>
47 48
#include <linux/sysctl.h>
#include <linux/device.h>
49
#include <linux/delay.h>
50
#include <linux/reboot.h>
51
#include <linux/kdebug.h>
52
#include <linux/kthread.h>
53
#include "xpc.h"
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

/* define two XPC debug device structures to be used with dev_dbg() et al */

struct device_driver xpc_dbg_name = {
	.name = "xpc"
};

struct device xpc_part_dbg_subname = {
	.bus_id = {0},		/* set to "part" at xpc_init() time */
	.driver = &xpc_dbg_name
};

struct device xpc_chan_dbg_subname = {
	.bus_id = {0},		/* set to "chan" at xpc_init() time */
	.driver = &xpc_dbg_name
};

struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;

74 75
static int xpc_kdebug_ignore;

76 77
/* systune related variables for /proc/sys directories */

78 79 80
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
81

82 83 84
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
85

86 87 88
int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit;	/* = 0 */
static int xpc_disengage_max_timelimit = 120;
89 90 91

static ctl_table xpc_sys_xpc_hb_dir[] = {
	{
92 93 94 95 96 97 98 99 100
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_interval",
	 .data = &xpc_hb_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_min_interval,
	 .extra2 = &xpc_hb_max_interval},
101
	{
102 103 104 105 106 107 108 109 110
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_check_interval",
	 .data = &xpc_hb_check_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_check_min_interval,
	 .extra2 = &xpc_hb_check_max_interval},
111
	{}
112 113 114
};
static ctl_table xpc_sys_xpc_dir[] = {
	{
115 116 117 118
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb",
	 .mode = 0555,
	 .child = xpc_sys_xpc_hb_dir},
119
	{
120
	 .ctl_name = CTL_UNNUMBERED,
121 122
	 .procname = "disengage_timelimit",
	 .data = &xpc_disengage_timelimit,
123 124 125 126
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
127 128
	 .extra1 = &xpc_disengage_min_timelimit,
	 .extra2 = &xpc_disengage_max_timelimit},
129
	{}
130 131 132
};
static ctl_table xpc_sys_dir[] = {
	{
133 134 135 136
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "xpc",
	 .mode = 0555,
	 .child = xpc_sys_xpc_dir},
137
	{}
138 139 140
};
static struct ctl_table_header *xpc_sysctl;

141 142
/* non-zero if any remote partition disengage was timed out */
int xpc_disengage_timedout;
143

144 145
/* #of activate IRQs received */
atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0);
146 147

/* IRQ handler notifies this wait queue on receipt of an IRQ */
148
DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
149 150

static unsigned long xpc_hb_check_timeout;
151 152
static struct timer_list xpc_hb_timer;
void *xpc_heartbeating_to_mask;
153

154
/* notification that the xpc_hb_checker thread has exited */
J
Jes Sorensen 已提交
155
static DECLARE_COMPLETION(xpc_hb_checker_exited);
156

157
/* notification that the xpc_discovery thread has exited */
J
Jes Sorensen 已提交
158
static DECLARE_COMPLETION(xpc_discovery_exited);
159 160 161

static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);

162 163 164 165 166
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
	.notifier_call = xpc_system_reboot,
};

167 168 169 170 171
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
	.notifier_call = xpc_system_die,
};

172 173
enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie,
						  u64 *paddr, size_t *len);
174
enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
175 176 177 178 179 180 181
void (*xpc_heartbeat_init) (void);
void (*xpc_heartbeat_exit) (void);
void (*xpc_increment_heartbeat) (void);
void (*xpc_offline_heartbeat) (void);
void (*xpc_online_heartbeat) (void);
void (*xpc_check_remote_hb) (void);

182
enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
183
void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
184
u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
185 186
enum xp_retval (*xpc_allocate_msgqueues) (struct xpc_channel *ch);
void (*xpc_free_msgqueues) (struct xpc_channel *ch);
187
void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
188
int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
189
struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
190

191 192 193 194 195
void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
					  u64 remote_rp_pa, int nasid);
void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
196

197
void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected);
198 199 200
enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
void (*xpc_teardown_infrastructure) (struct xpc_partition *part);

201 202 203 204 205 206
void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
int (*xpc_partition_engaged) (short partid);
int (*xpc_any_partition_engaged) (void);
void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
void (*xpc_assume_partition_disengaged) (short partid);

207
void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
208
				     unsigned long *irq_flags);
209 210 211
void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
				   unsigned long *irq_flags);
void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
212
				    unsigned long *irq_flags);
213 214
void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
				  unsigned long *irq_flags);
215

216 217 218
enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
				void *payload, u16 payload_size, u8 notify_type,
				xpc_notify_func func, void *key);
219
void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
220

221
/*
222
 * Timer function to enforce the timelimit on the partition disengage.
223 224
 */
static void
225
xpc_timeout_partition_disengage(unsigned long data)
226
{
227
	struct xpc_partition *part = (struct xpc_partition *)data;
228

229
	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
230

231
	(void)xpc_partition_disengaged(part);
232

233 234
	DBUG_ON(part->disengage_timeout != 0);
	DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
235 236
}

237 238 239 240 241 242 243 244
/*
 * Timer to produce the heartbeat.  The timer structures function is
 * already set when this is initially called.  A tunable is used to
 * specify when the next timeout should occur.
 */
static void
xpc_hb_beater(unsigned long dummy)
{
245
	xpc_increment_heartbeat();
246

247
	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
248
		wake_up_interruptible(&xpc_activate_IRQ_wq);
249 250 251 252 253

	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
	add_timer(&xpc_hb_timer);
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
static void
xpc_start_hb_beater(void)
{
	xpc_heartbeat_init();
	init_timer(&xpc_hb_timer);
	xpc_hb_timer.function = xpc_hb_beater;
	xpc_hb_beater(0);
}

static void
xpc_stop_hb_beater(void)
{
	del_timer_sync(&xpc_hb_timer);
	xpc_heartbeat_exit();
}

270 271 272 273 274 275 276 277 278
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
	int last_IRQ_count = 0;
	int new_IRQ_count;
279
	int force_IRQ = 0;
280 281 282

	/* this thread was marked active by xpc_hb_init() */

283
	set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
284

285
	/* set our heartbeating to other partitions into motion */
286
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
287
	xpc_start_hb_beater();
288

289
	while (!xpc_exiting) {
290 291 292

		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
			"been received\n",
293
			(int)(xpc_hb_check_timeout - jiffies),
294
			atomic_read(&xpc_activate_IRQ_rcvd) - last_IRQ_count);
295 296

		/* checking of remote heartbeats is skewed by IRQ handling */
297
		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
298 299 300 301 302
			dev_dbg(xpc_part, "checking remote heartbeats\n");
			xpc_check_remote_hb();

			/*
			 * We need to periodically recheck to ensure no
303
			 * IRQ/amo pairs have been missed.  That check
304 305 306 307 308
			 * must always reset xpc_hb_check_timeout.
			 */
			force_IRQ = 1;
		}

309
		/* check for outstanding IRQs */
310
		new_IRQ_count = atomic_read(&xpc_activate_IRQ_rcvd);
311 312 313 314 315 316
		if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
			force_IRQ = 0;

			dev_dbg(xpc_part, "found an IRQ to process; will be "
				"resetting xpc_hb_check_timeout\n");

317 318
			xpc_process_activate_IRQ_rcvd(new_IRQ_count -
						      last_IRQ_count);
319 320 321
			last_IRQ_count = new_IRQ_count;

			xpc_hb_check_timeout = jiffies +
322
			    (xpc_hb_check_interval * HZ);
323
		}
324 325

		/* wait for IRQ or timeout */
326 327 328
		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
					       (last_IRQ_count < atomic_read(
						&xpc_activate_IRQ_rcvd)
329 330
						|| time_is_before_eq_jiffies(
						xpc_hb_check_timeout) ||
331
						xpc_exiting));
332 333
	}

334 335
	xpc_stop_hb_beater();

336 337
	dev_dbg(xpc_part, "heartbeat checker is exiting\n");

338
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
339
	complete(&xpc_hb_checker_exited);
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	return 0;
}

/*
 * This thread will attempt to discover other partitions to activate
 * based on info provided by SAL. This new thread is short lived and
 * will exit once discovery is complete.
 */
static int
xpc_initiate_discovery(void *ignore)
{
	xpc_discovery();

	dev_dbg(xpc_part, "discovery thread is exiting\n");

355
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
356
	complete(&xpc_discovery_exited);
357 358 359 360 361
	return 0;
}

/*
 * The first kthread assigned to a newly activated partition is the one
362
 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
363 364 365 366 367 368 369 370 371 372 373 374 375
 * that kthread until the partition is brought down, at which time that kthread
 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
 * that XPC has dismantled all communication infrastructure for the associated
 * partition.) This kthread becomes the channel manager for that partition.
 *
 * Each active partition has a channel manager, who, besides connecting and
 * disconnecting channels, will ensure that each of the partition's connected
 * channels has the required number of assigned kthreads to get the work done.
 */
static void
xpc_channel_mgr(struct xpc_partition *part)
{
	while (part->act_state != XPC_P_DEACTIVATING ||
376 377
	       atomic_read(&part->nchannels_active) > 0 ||
	       !xpc_partition_disengaged(part)) {
378

379
		xpc_process_sent_chctl_flags(part);
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394

		/*
		 * Wait until we've been requested to activate kthreads or
		 * all of the channel's message queues have been torn down or
		 * a signal is pending.
		 *
		 * The channel_mgr_requests is set to 1 after being awakened,
		 * This is done to prevent the channel mgr from making one pass
		 * through the loop for each request, since he will
		 * be servicing all the requests in one pass. The reason it's
		 * set to 1 instead of 0 is so that other kthreads will know
		 * that the channel mgr is running and won't bother trying to
		 * wake him up.
		 */
		atomic_dec(&part->channel_mgr_requests);
395
		(void)wait_event_interruptible(part->channel_mgr_wq,
396
				(atomic_read(&part->channel_mgr_requests) > 0 ||
397
				 part->chctl.all_flags != 0 ||
398 399 400
				 (part->act_state == XPC_P_DEACTIVATING &&
				 atomic_read(&part->nchannels_active) == 0 &&
				 xpc_partition_disengaged(part))));
401 402 403 404 405 406 407 408 409 410 411
		atomic_set(&part->channel_mgr_requests, 1);
	}
}

/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
412 413 414
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
415 416 417 418
 */
static int
xpc_activating(void *__partid)
{
419
	short partid = (u64)__partid;
420 421 422
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

423
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

	spin_lock_irqsave(&part->act_lock, irq_flags);

	if (part->act_state == XPC_P_DEACTIVATING) {
		part->act_state = XPC_P_INACTIVE;
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
	DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
	part->act_state = XPC_P_ACTIVATING;

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

441
	dev_dbg(xpc_part, "activating partition %d\n", partid);
442

443
	xpc_allow_hb(partid);
444

445 446 447 448 449 450 451 452 453 454 455 456
	if (xpc_setup_infrastructure(part) == xpSuccess) {
		(void)xpc_part_ref(part);	/* this will always succeed */

		if (xpc_make_first_contact(part) == xpSuccess) {
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
		xpc_teardown_infrastructure(part);
	}
457

458
	xpc_disallow_hb(partid);
459 460
	xpc_mark_partition_inactive(part);

461
	if (part->reason == xpReactivating) {
462
		/* interrupting ourselves results in activating partition */
463
		xpc_request_partition_reactivation(part);
464 465 466 467 468 469 470 471
	}

	return 0;
}

void
xpc_activate_partition(struct xpc_partition *part)
{
472
	short partid = XPC_PARTID(part);
473
	unsigned long irq_flags;
474
	struct task_struct *kthread;
475 476 477 478 479

	spin_lock_irqsave(&part->act_lock, irq_flags);

	DBUG_ON(part->act_state != XPC_P_INACTIVE);

480
	part->act_state = XPC_P_ACTIVATION_REQ;
481
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
482 483

	spin_unlock_irqrestore(&part->act_lock, irq_flags);
484

485 486 487
	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
488 489
		spin_lock_irqsave(&part->act_lock, irq_flags);
		part->act_state = XPC_P_INACTIVE;
490
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
491 492
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
}

void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

515
	if (needed <= 0)
516 517 518 519
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
520
		if (needed <= 0)
521 522 523 524 525 526
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

527
	xpc_create_kthreads(ch, needed, 0);
528 529 530 531 532 533 534 535 536 537 538
}

/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
	do {
		/* deliver messages to their intended recipients */

539
		while (xpc_n_of_deliverable_msgs(ch) > 0 &&
540
		       !(ch->flags & XPC_C_DISCONNECTING)) {
541 542 543 544
			xpc_deliver_msg(ch);
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
545
		    ch->kthreads_idle_limit) {
546 547 548 549 550 551 552 553
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

554
		(void)wait_event_interruptible_exclusive(ch->idle_wq,
555
				(xpc_n_of_deliverable_msgs(ch) > 0 ||
556
				 (ch->flags & XPC_C_DISCONNECTING)));
557 558 559

		atomic_dec(&ch->kthreads_idle);

560
	} while (!(ch->flags & XPC_C_DISCONNECTING));
561 562 563
}

static int
564
xpc_kthread_start(void *args)
565
{
566
	short partid = XPC_UNPACK_ARG1(args);
567 568 569 570
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
571
	unsigned long irq_flags;
572 573 574 575 576 577 578 579 580 581

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

582
		spin_lock_irqsave(&ch->lock, irq_flags);
583 584
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
585 586
			spin_unlock_irqrestore(&ch->lock, irq_flags);

587 588
			xpc_connected_callout(ch);

589 590 591 592
			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

593 594 595 596 597 598 599
			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
600
			n_needed = xpc_n_of_deliverable_msgs(ch) - 1;
601
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
602
				xpc_activate_kthreads(ch, n_needed);
603

604 605
		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
606 607 608 609 610
		}

		xpc_kthread_waitmsgs(part, ch);
	}

611
	/* let registerer know that connection is disconnecting */
612

613 614
	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
615
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
616
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
617
		spin_unlock_irqrestore(&ch->lock, irq_flags);
618

619
		xpc_disconnect_callout(ch, xpDisconnecting);
620 621 622 623 624 625

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

626 627 628
	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
	    atomic_dec_return(&part->nchannels_engaged) == 0) {
		xpc_indicate_partition_disengaged(part);
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}

/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
653
xpc_create_kthreads(struct xpc_channel *ch, int needed,
654
		    int ignore_disconnecting)
655 656 657
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
658
	struct xpc_partition *part = &xpc_partitions[ch->partid];
659
	struct task_struct *kthread;
660 661

	while (needed-- > 0) {
662 663 664 665 666 667

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
668 669 670 671
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
672
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
673 674 675 676 677 678
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

679 680 681
		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
				xpc_indicate_partition_engaged(part);
682
		}
683
		(void)xpc_part_ref(part);
684 685
		xpc_msgqueue_ref(ch);

686 687 688
		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
689
			/* the fork failed */
690 691 692 693 694 695 696

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
697
			 * them is the xpDisconnecting callout that this
698
			 * failed kthread_run() would have made.
699 700
			 */

701 702
			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
703
				xpc_indicate_partition_disengaged(part);
704 705 706
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
707 708

			if (atomic_read(&ch->kthreads_assigned) <
709
			    ch->kthreads_idle_limit) {
710 711 712 713 714 715
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
716
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
717
						       &irq_flags);
718 719 720 721 722 723 724 725 726 727
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}

void
xpc_disconnect_wait(int ch_number)
{
728
	unsigned long irq_flags;
729
	short partid;
730 731
	struct xpc_partition *part;
	struct xpc_channel *ch;
732
	int wakeup_channel_mgr;
733 734

	/* now wait for all callouts to the caller's function to cease */
735
	for (partid = 0; partid < xp_max_npartitions; partid++) {
736 737
		part = &xpc_partitions[partid];

738
		if (!xpc_part_ref(part))
739
			continue;
740

741
		ch = &part->channels[ch_number];
742

743
		if (!(ch->flags & XPC_C_WDISCONNECT)) {
744
			xpc_part_deref(part);
745
			continue;
746
		}
747

J
Jes Sorensen 已提交
748
		wait_for_completion(&ch->wdisconnect_wait);
749 750 751 752 753

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

754
		if (ch->delayed_chctl_flags) {
755
			if (part->act_state != XPC_P_DEACTIVATING) {
756 757 758 759
				spin_lock(&part->chctl_lock);
				part->chctl.flags[ch->number] |=
				    ch->delayed_chctl_flags;
				spin_unlock(&part->chctl_lock);
760 761
				wakeup_channel_mgr = 1;
			}
762
			ch->delayed_chctl_flags = 0;
763
		}
764 765 766 767

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

768
		if (wakeup_channel_mgr)
769 770 771
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
772 773 774 775
	}
}

static void
776
xpc_do_exit(enum xp_retval reason)
777
{
778
	short partid;
779
	int active_part_count, printed_waiting_msg = 0;
780
	struct xpc_partition *part;
781
	unsigned long printmsg_time, disengage_timeout = 0;
782

783 784
	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
	DBUG_ON(xpc_exiting == 1);
785 786

	/*
787 788 789
	 * Let the heartbeat checker thread and the discovery thread
	 * (if one is running) know that they should exit. Also wake up
	 * the heartbeat checker thread in case it's sleeping.
790 791
	 */
	xpc_exiting = 1;
792
	wake_up_interruptible(&xpc_activate_IRQ_wq);
793

794
	/* wait for the discovery thread to exit */
J
Jes Sorensen 已提交
795
	wait_for_completion(&xpc_discovery_exited);
796

797
	/* wait for the heartbeat checker thread to exit */
J
Jes Sorensen 已提交
798
	wait_for_completion(&xpc_hb_checker_exited);
799

800
	/* sleep for a 1/3 of a second or so */
801
	(void)msleep_interruptible(300);
802 803 804

	/* wait for all partitions to become inactive */

805 806
	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
	xpc_disengage_timedout = 0;
807

808 809 810
	do {
		active_part_count = 0;

811
		for (partid = 0; partid < xp_max_npartitions; partid++) {
812 813
			part = &xpc_partitions[partid];

814
			if (xpc_partition_disengaged(part) &&
815
			    part->act_state == XPC_P_INACTIVE) {
816
				continue;
817
			}
818 819 820 821

			active_part_count++;

			XPC_DEACTIVATE_PARTITION(part, reason);
822

823 824
			if (part->disengage_timeout > disengage_timeout)
				disengage_timeout = part->disengage_timeout;
825
		}
826

827
		if (xpc_any_partition_engaged()) {
828
			if (time_is_before_jiffies(printmsg_time)) {
829
				dev_info(xpc_part, "waiting for remote "
830 831 832
					 "partitions to deactivate, timeout in "
					 "%ld seconds\n", (disengage_timeout -
					 jiffies) / HZ);
833
				printmsg_time = jiffies +
834
				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
835 836 837 838 839 840
				printed_waiting_msg = 1;
			}

		} else if (active_part_count > 0) {
			if (printed_waiting_msg) {
				dev_info(xpc_part, "waiting for local partition"
841
					 " to deactivate\n");
842 843 844 845
				printed_waiting_msg = 0;
			}

		} else {
846
			if (!xpc_disengage_timedout) {
847
				dev_info(xpc_part, "all partitions have "
848
					 "deactivated\n");
849 850
			}
			break;
851 852
		}

853
		/* sleep for a 1/3 of a second or so */
854
		(void)msleep_interruptible(300);
855 856 857

	} while (1);

858
	DBUG_ON(xpc_any_partition_engaged());
859
	DBUG_ON(xpc_any_hbs_allowed() != 0);
860

861 862
	/* a zero timestamp indicates our rsvd page is not initialized */
	xpc_rsvd_page->ts_jiffies = 0;
863

864
	if (reason == xpUnloading) {
865
		(void)unregister_die_notifier(&xpc_die_notifier);
866
		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
867
	}
868

869 870 871
	/* clear the interface to XPC's functions */
	xpc_clear_interface();

872
	if (xpc_sysctl)
873
		unregister_sysctl_table(xpc_sysctl);
874

875
	kfree(xpc_partitions);
876 877 878 879 880

	if (is_shub())
		xpc_exit_sn2();
	else
		xpc_exit_uv();
881 882
}

883
/*
884 885 886 887 888
 * This function is called when the system is being rebooted.
 */
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
889
	enum xp_retval reason;
890 891 892

	switch (event) {
	case SYS_RESTART:
893
		reason = xpSystemReboot;
894 895
		break;
	case SYS_HALT:
896
		reason = xpSystemHalt;
897 898
		break;
	case SYS_POWER_OFF:
899
		reason = xpSystemPoweroff;
900 901
		break;
	default:
902
		reason = xpSystemGoingDown;
903 904 905 906 907 908 909
	}

	xpc_do_exit(reason);
	return NOTIFY_DONE;
}

/*
910 911
 * Notify other partitions to deactivate from us by first disengaging from all
 * references to our memory.
912 913
 */
static void
914
xpc_die_deactivate(void)
915 916
{
	struct xpc_partition *part;
917
	short partid;
918
	int any_engaged;
919 920
	long keep_waiting;
	long wait_to_print;
921 922 923 924

	/* keep xpc_hb_checker thread from doing anything (just in case) */
	xpc_exiting = 1;

925
	xpc_disallow_all_hbs();	/*indicate we're deactivated */
926

927
	for (partid = 0; partid < xp_max_npartitions; partid++) {
928 929
		part = &xpc_partitions[partid];

930
		if (xpc_partition_engaged(partid) ||
931
		    part->act_state != XPC_P_INACTIVE) {
932 933
			xpc_request_partition_deactivation(part);
			xpc_indicate_partition_disengaged(part);
934 935 936
		}
	}

937 938
	/*
	 * Though we requested that all other partitions deactivate from us,
939 940 941 942 943 944
	 * we only wait until they've all disengaged or we've reached the
	 * defined timelimit.
	 *
	 * Given that one iteration through the following while-loop takes
	 * approximately 200 microseconds, calculate the #of loops to take
	 * before bailing and the #of loops before printing a waiting message.
945
	 */
946 947
	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
948

949
	while (1) {
950 951 952
		any_engaged = xpc_any_partition_engaged();
		if (!any_engaged) {
			dev_info(xpc_part, "all partitions have deactivated\n");
953 954
			break;
		}
955

956
		if (!keep_waiting--) {
957 958
			for (partid = 0; partid < xp_max_npartitions;
			     partid++) {
959 960
				if (xpc_partition_engaged(partid)) {
					dev_info(xpc_part, "deactivate from "
961 962
						 "remote partition %d timed "
						 "out\n", partid);
963 964 965 966 967
				}
			}
			break;
		}

968
		if (!wait_to_print--) {
969
			dev_info(xpc_part, "waiting for remote partitions to "
970
				 "deactivate, timeout in %ld seconds\n",
971 972 973
				 keep_waiting / (1000 * 5));
			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
			    1000 * 5;
974
		}
975 976

		udelay(200);
977 978 979 980
	}
}

/*
981 982 983 984 985 986
 * This function is called when the system is being restarted or halted due
 * to some sort of system failure. If this is the case we need to notify the
 * other partitions to disengage from all references to our memory.
 * This function can also be called when our heartbeater could be offlined
 * for a time. In this case we need to notify other partitions to not worry
 * about the lack of a heartbeat.
987 988 989 990
 */
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
{
991
#ifdef CONFIG_IA64		/* !!! temporary kludge */
992 993 994
	switch (event) {
	case DIE_MACHINE_RESTART:
	case DIE_MACHINE_HALT:
995
		xpc_die_deactivate();
996
		break;
997 998 999

	case DIE_KDEBUG_ENTER:
		/* Should lack of heartbeat be ignored by other partitions? */
1000
		if (!xpc_kdebug_ignore)
1001
			break;
1002

1003
		/* fall through */
1004 1005
	case DIE_MCA_MONARCH_ENTER:
	case DIE_INIT_MONARCH_ENTER:
1006
		xpc_offline_heartbeat();
1007
		break;
1008 1009 1010

	case DIE_KDEBUG_LEAVE:
		/* Is lack of heartbeat being ignored by other partitions? */
1011
		if (!xpc_kdebug_ignore)
1012
			break;
1013

1014
		/* fall through */
1015 1016
	case DIE_MCA_MONARCH_LEAVE:
	case DIE_INIT_MONARCH_LEAVE:
1017
		xpc_online_heartbeat();
1018 1019
		break;
	}
1020 1021 1022
#else
	xpc_die_deactivate();
#endif
1023 1024 1025 1026

	return NOTIFY_DONE;
}

1027 1028 1029 1030
int __init
xpc_init(void)
{
	int ret;
1031
	short partid;
1032
	struct xpc_partition *part;
1033
	struct task_struct *kthread;
1034 1035 1036

	snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
	snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1037

1038 1039 1040
	if (is_shub()) {
		/*
		 * The ia64-sn2 architecture supports at most 64 partitions.
1041
		 * And the inability to unregister remote amos restricts us
1042 1043 1044 1045 1046 1047
		 * further to only support exactly 64 partitions on this
		 * architecture, no less.
		 */
		if (xp_max_npartitions != 64)
			return -EINVAL;

1048 1049 1050
		ret = xpc_init_sn2();
		if (ret != 0)
			return ret;
1051 1052 1053 1054 1055

	} else if (is_uv()) {
		xpc_init_uv();

	} else {
1056
		return -ENODEV;
1057
	}
1058

1059 1060 1061 1062 1063
	xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
				 xp_max_npartitions, GFP_KERNEL);
	if (xpc_partitions == NULL) {
		dev_err(xpc_part, "can't get memory for partition structure\n");
		ret = -ENOMEM;
1064
		goto out_1;
1065
	}
1066 1067 1068 1069 1070 1071 1072 1073 1074

	/*
	 * The first few fields of each entry of xpc_partitions[] need to
	 * be initialized now so that calls to xpc_connect() and
	 * xpc_disconnect() can be made prior to the activation of any remote
	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
	 * PARTITION HAS BEEN ACTIVATED.
	 */
1075
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1076 1077
		part = &xpc_partitions[partid];

1078
		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1079

1080
		part->activate_IRQ_rcvd = 0;
1081 1082 1083
		spin_lock_init(&part->act_lock);
		part->act_state = XPC_P_INACTIVE;
		XPC_SET_REASON(part, 0, 0);
1084

1085 1086 1087 1088
		init_timer(&part->disengage_timer);
		part->disengage_timer.function =
		    xpc_timeout_partition_disengage;
		part->disengage_timer.data = (unsigned long)part;
1089

1090 1091 1092 1093 1094
		part->setup_state = XPC_P_UNSET;
		init_waitqueue_head(&part->teardown_wq);
		atomic_set(&part->references, 0);
	}

1095 1096
	xpc_sysctl = register_sysctl_table(xpc_sys_dir);

1097 1098 1099 1100 1101
	/*
	 * Fill the partition reserved page with the information needed by
	 * other partitions to discover we are alive and establish initial
	 * communications.
	 */
1102
	xpc_rsvd_page = xpc_setup_rsvd_page();
1103
	if (xpc_rsvd_page == NULL) {
1104 1105
		dev_err(xpc_part, "can't setup our reserved page\n");
		ret = -EBUSY;
1106
		goto out_2;
1107 1108
	}

1109 1110
	/* add ourselves to the reboot_notifier_list */
	ret = register_reboot_notifier(&xpc_reboot_notifier);
1111
	if (ret != 0)
1112 1113
		dev_warn(xpc_part, "can't register reboot notifier\n");

1114
	/* add ourselves to the die_notifier list */
1115
	ret = register_die_notifier(&xpc_die_notifier);
1116
	if (ret != 0)
1117 1118
		dev_warn(xpc_part, "can't register die notifier\n");

1119 1120 1121 1122
	/*
	 * The real work-horse behind xpc.  This processes incoming
	 * interrupts and monitors remote heartbeats.
	 */
1123 1124
	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
	if (IS_ERR(kthread)) {
1125
		dev_err(xpc_part, "failed while forking hb check thread\n");
1126
		ret = -EBUSY;
1127
		goto out_3;
1128 1129 1130 1131 1132 1133 1134
	}

	/*
	 * Startup a thread that will attempt to discover other partitions to
	 * activate based on info provided by SAL. This new thread is short
	 * lived and will exit once discovery is complete.
	 */
1135 1136 1137
	kthread = kthread_run(xpc_initiate_discovery, NULL,
			      XPC_DISCOVERY_THREAD_NAME);
	if (IS_ERR(kthread)) {
1138 1139 1140
		dev_err(xpc_part, "failed while forking discovery thread\n");

		/* mark this new thread as a non-starter */
J
Jes Sorensen 已提交
1141
		complete(&xpc_discovery_exited);
1142

1143
		xpc_do_exit(xpUnloading);
1144 1145 1146 1147 1148
		return -EBUSY;
	}

	/* set the interface to point at XPC's functions */
	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1149 1150
			  xpc_initiate_send, xpc_initiate_send_notify,
			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1151 1152

	return 0;
1153 1154

	/* initialization was not successful */
1155
out_3:
1156 1157
	/* a zero timestamp indicates our rsvd page is not initialized */
	xpc_rsvd_page->ts_jiffies = 0;
1158

1159 1160
	(void)unregister_die_notifier(&xpc_die_notifier);
	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1161
out_2:
1162 1163 1164
	if (xpc_sysctl)
		unregister_sysctl_table(xpc_sysctl);
	kfree(xpc_partitions);
1165 1166 1167 1168 1169
out_1:
	if (is_shub())
		xpc_exit_sn2();
	else
		xpc_exit_uv();
1170
	return ret;
1171 1172
}

1173
module_init(xpc_init);
1174 1175 1176 1177

void __exit
xpc_exit(void)
{
1178
	xpc_do_exit(xpUnloading);
1179 1180
}

1181
module_exit(xpc_exit);
1182 1183 1184 1185 1186 1187 1188

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");

module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1189
		 "heartbeat increments.");
1190 1191 1192

module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1193
		 "heartbeat checks.");
1194

1195 1196 1197
module_param(xpc_disengage_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
		 "for disengage to complete.");
1198

1199 1200
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1201
		 "other partitions when dropping into kdebug.");