xpc_main.c 36.2 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 */

/*
 * Cross Partition Communication (XPC) support - standard version.
 *
 *	XPC provides a message passing capability that crosses partition
 *	boundaries. This module is made up of two parts:
 *
 *	    partition	This part detects the presence/absence of other
 *			partitions. It provides a heartbeat and monitors
 *			the heartbeats of other partitions.
 *
 *	    channel	This part manages the channels and sends/receives
 *			messages across them to/from other partitions.
 *
 *	There are a couple of additional functions residing in XP, which
 *	provide an interface to XPC for its users.
 *
 *
 *	Caveats:
 *
28
 *	  . Currently on sn2, we have no way to determine which nasid an IRQ
29 30 31 32 33
 *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
 *	    followed by an IPI. The amo indicates where data is to be pulled
 *	    from, so after the IPI arrives, the remote partition checks the amo
 *	    word. The IPI can actually arrive before the amo however, so other
 *	    code must periodically check for this case. Also, remote amo
34 35 36 37
 *	    operations do not reliably time out. Thus we do a remote PIO read
 *	    solely to know whether the remote partition is down and whether we
 *	    should stop sending IPIs to it. This remote PIO read operation is
 *	    set up in a special nofault region so SAL knows to ignore (and
38
 *	    cleanup) any errors due to the remote amo write, PIO read, and/or
39
 *	    PIO write operations.
40 41 42 43 44 45 46
 *
 *	    If/when new hardware solves this IPI problem, we should abandon
 *	    the current approach.
 *
 */

#include <linux/module.h>
47
#include <linux/slab.h>
48 49
#include <linux/sysctl.h>
#include <linux/device.h>
50
#include <linux/delay.h>
51
#include <linux/reboot.h>
52
#include <linux/kdebug.h>
53
#include <linux/kthread.h>
54
#include "xpc.h"
55

R
Robin Holt 已提交
56 57 58 59
#ifdef CONFIG_X86_64
#include <asm/traps.h>
#endif

60 61 62 63 64 65 66
/* define two XPC debug device structures to be used with dev_dbg() et al */

struct device_driver xpc_dbg_name = {
	.name = "xpc"
};

struct device xpc_part_dbg_subname = {
67
	.init_name = "",	/* set to "part" at xpc_init() time */
68 69 70 71
	.driver = &xpc_dbg_name
};

struct device xpc_chan_dbg_subname = {
72
	.init_name = "",	/* set to "chan" at xpc_init() time */
73 74 75 76 77 78
	.driver = &xpc_dbg_name
};

struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;

79 80
static int xpc_kdebug_ignore;

81 82
/* systune related variables for /proc/sys directories */

83 84 85
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
86

87 88 89
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
90

91 92 93
int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit;	/* = 0 */
static int xpc_disengage_max_timelimit = 120;
94

95
static struct ctl_table xpc_sys_xpc_hb_dir[] = {
96
	{
97 98 99 100
	 .procname = "hb_interval",
	 .data = &xpc_hb_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
101
	 .proc_handler = proc_dointvec_minmax,
102 103
	 .extra1 = &xpc_hb_min_interval,
	 .extra2 = &xpc_hb_max_interval},
104
	{
105 106 107 108
	 .procname = "hb_check_interval",
	 .data = &xpc_hb_check_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
109
	 .proc_handler = proc_dointvec_minmax,
110 111
	 .extra1 = &xpc_hb_check_min_interval,
	 .extra2 = &xpc_hb_check_max_interval},
112
	{}
113
};
114
static struct ctl_table xpc_sys_xpc_dir[] = {
115
	{
116 117 118
	 .procname = "hb",
	 .mode = 0555,
	 .child = xpc_sys_xpc_hb_dir},
119
	{
120 121
	 .procname = "disengage_timelimit",
	 .data = &xpc_disengage_timelimit,
122 123
	 .maxlen = sizeof(int),
	 .mode = 0644,
124
	 .proc_handler = proc_dointvec_minmax,
125 126
	 .extra1 = &xpc_disengage_min_timelimit,
	 .extra2 = &xpc_disengage_max_timelimit},
127
	{}
128
};
129
static struct ctl_table xpc_sys_dir[] = {
130
	{
131 132 133
	 .procname = "xpc",
	 .mode = 0555,
	 .child = xpc_sys_xpc_dir},
134
	{}
135 136 137
};
static struct ctl_table_header *xpc_sysctl;

138 139
/* non-zero if any remote partition disengage was timed out */
int xpc_disengage_timedout;
140

141 142 143
/* #of activate IRQs received and not yet processed */
int xpc_activate_IRQ_rcvd;
DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
144 145

/* IRQ handler notifies this wait queue on receipt of an IRQ */
146
DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
147 148

static unsigned long xpc_hb_check_timeout;
149
static struct timer_list xpc_hb_timer;
150

151
/* notification that the xpc_hb_checker thread has exited */
J
Jes Sorensen 已提交
152
static DECLARE_COMPLETION(xpc_hb_checker_exited);
153

154
/* notification that the xpc_discovery thread has exited */
J
Jes Sorensen 已提交
155
static DECLARE_COMPLETION(xpc_discovery_exited);
156 157 158

static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);

159 160 161 162 163
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
	.notifier_call = xpc_system_reboot,
};

164 165 166 167 168
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
	.notifier_call = xpc_system_die,
};

R
Robin Holt 已提交
169
struct xpc_arch_operations xpc_arch_ops;
170

171
/*
172
 * Timer function to enforce the timelimit on the partition disengage.
173 174
 */
static void
175
xpc_timeout_partition_disengage(unsigned long data)
176
{
177
	struct xpc_partition *part = (struct xpc_partition *)data;
178

179
	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
180

181
	(void)xpc_partition_disengaged(part);
182

183
	DBUG_ON(part->disengage_timeout != 0);
R
Robin Holt 已提交
184
	DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
185 186
}

187 188 189 190 191 192 193 194
/*
 * Timer to produce the heartbeat.  The timer structures function is
 * already set when this is initially called.  A tunable is used to
 * specify when the next timeout should occur.
 */
static void
xpc_hb_beater(unsigned long dummy)
{
R
Robin Holt 已提交
195
	xpc_arch_ops.increment_heartbeat();
196

197
	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
198
		wake_up_interruptible(&xpc_activate_IRQ_wq);
199 200 201 202 203

	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
	add_timer(&xpc_hb_timer);
}

204 205 206
static void
xpc_start_hb_beater(void)
{
R
Robin Holt 已提交
207
	xpc_arch_ops.heartbeat_init();
208 209 210 211 212 213 214 215 216
	init_timer(&xpc_hb_timer);
	xpc_hb_timer.function = xpc_hb_beater;
	xpc_hb_beater(0);
}

static void
xpc_stop_hb_beater(void)
{
	del_timer_sync(&xpc_hb_timer);
R
Robin Holt 已提交
217
	xpc_arch_ops.heartbeat_exit();
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/*
 * At periodic intervals, scan through all active partitions and ensure
 * their heartbeat is still active.  If not, the partition is deactivated.
 */
static void
xpc_check_remote_hb(void)
{
	struct xpc_partition *part;
	short partid;
	enum xp_retval ret;

	for (partid = 0; partid < xp_max_npartitions; partid++) {

		if (xpc_exiting)
			break;

		if (partid == xp_partition_id)
			continue;

		part = &xpc_partitions[partid];

241 242
		if (part->act_state == XPC_P_AS_INACTIVE ||
		    part->act_state == XPC_P_AS_DEACTIVATING) {
243 244 245
			continue;
		}

R
Robin Holt 已提交
246
		ret = xpc_arch_ops.get_remote_heartbeat(part);
247 248 249 250 251
		if (ret != xpSuccess)
			XPC_DEACTIVATE_PARTITION(part, ret);
	}
}

252 253 254 255 256 257 258
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
259
	int force_IRQ = 0;
260 261 262

	/* this thread was marked active by xpc_hb_init() */

263
	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
264

265
	/* set our heartbeating to other partitions into motion */
266
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
267
	xpc_start_hb_beater();
268

269
	while (!xpc_exiting) {
270 271 272

		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
			"been received\n",
273
			(int)(xpc_hb_check_timeout - jiffies),
274
			xpc_activate_IRQ_rcvd);
275 276

		/* checking of remote heartbeats is skewed by IRQ handling */
277
		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
278 279 280
			xpc_hb_check_timeout = jiffies +
			    (xpc_hb_check_interval * HZ);

281 282 283 284
			dev_dbg(xpc_part, "checking remote heartbeats\n");
			xpc_check_remote_hb();

			/*
285 286
			 * On sn2 we need to periodically recheck to ensure no
			 * IRQ/amo pairs have been missed.
287
			 */
288 289
			if (is_shub())
				force_IRQ = 1;
290 291
		}

292
		/* check for outstanding IRQs */
293
		if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
294
			force_IRQ = 0;
295 296
			dev_dbg(xpc_part, "processing activate IRQs "
				"received\n");
R
Robin Holt 已提交
297
			xpc_arch_ops.process_activate_IRQ_rcvd();
298
		}
299 300

		/* wait for IRQ or timeout */
301
		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
302
					       (time_is_before_eq_jiffies(
303
						xpc_hb_check_timeout) ||
304
						xpc_activate_IRQ_rcvd > 0 ||
305
						xpc_exiting));
306 307
	}

308 309
	xpc_stop_hb_beater();

310 311
	dev_dbg(xpc_part, "heartbeat checker is exiting\n");

312
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
313
	complete(&xpc_hb_checker_exited);
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	return 0;
}

/*
 * This thread will attempt to discover other partitions to activate
 * based on info provided by SAL. This new thread is short lived and
 * will exit once discovery is complete.
 */
static int
xpc_initiate_discovery(void *ignore)
{
	xpc_discovery();

	dev_dbg(xpc_part, "discovery thread is exiting\n");

329
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
330
	complete(&xpc_discovery_exited);
331 332 333 334 335
	return 0;
}

/*
 * The first kthread assigned to a newly activated partition is the one
336
 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
337 338 339 340 341 342 343 344 345 346 347 348
 * that kthread until the partition is brought down, at which time that kthread
 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
 * that XPC has dismantled all communication infrastructure for the associated
 * partition.) This kthread becomes the channel manager for that partition.
 *
 * Each active partition has a channel manager, who, besides connecting and
 * disconnecting channels, will ensure that each of the partition's connected
 * channels has the required number of assigned kthreads to get the work done.
 */
static void
xpc_channel_mgr(struct xpc_partition *part)
{
349
	while (part->act_state != XPC_P_AS_DEACTIVATING ||
350 351
	       atomic_read(&part->nchannels_active) > 0 ||
	       !xpc_partition_disengaged(part)) {
352

353
		xpc_process_sent_chctl_flags(part);
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

		/*
		 * Wait until we've been requested to activate kthreads or
		 * all of the channel's message queues have been torn down or
		 * a signal is pending.
		 *
		 * The channel_mgr_requests is set to 1 after being awakened,
		 * This is done to prevent the channel mgr from making one pass
		 * through the loop for each request, since he will
		 * be servicing all the requests in one pass. The reason it's
		 * set to 1 instead of 0 is so that other kthreads will know
		 * that the channel mgr is running and won't bother trying to
		 * wake him up.
		 */
		atomic_dec(&part->channel_mgr_requests);
369
		(void)wait_event_interruptible(part->channel_mgr_wq,
370
				(atomic_read(&part->channel_mgr_requests) > 0 ||
371
				 part->chctl.all_flags != 0 ||
372
				 (part->act_state == XPC_P_AS_DEACTIVATING &&
373 374
				 atomic_read(&part->nchannels_active) == 0 &&
				 xpc_partition_disengaged(part))));
375 376 377 378
		atomic_set(&part->channel_mgr_requests, 1);
	}
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
/*
 * Guarantee that the kzalloc'd memory is cacheline aligned.
 */
void *
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
	/* see if kzalloc will give us cachline aligned memory by default */
	*base = kzalloc(size, flags);
	if (*base == NULL)
		return NULL;

	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
		return *base;

	kfree(*base);

	/* nope, we'll have to do it ourselves */
	*base = kzalloc(size + L1_CACHE_BYTES, flags);
	if (*base == NULL)
		return NULL;

	return (void *)L1_CACHE_ALIGN((u64)*base);
}

/*
 * Setup the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static enum xp_retval
xpc_setup_ch_structures(struct xpc_partition *part)
{
	enum xp_retval ret;
	int ch_number;
	struct xpc_channel *ch;
	short partid = XPC_PARTID(part);

	/*
	 * Allocate all of the channel structures as a contiguous chunk of
	 * memory.
	 */
	DBUG_ON(part->channels != NULL);
	part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
				 GFP_KERNEL);
	if (part->channels == NULL) {
		dev_err(xpc_chan, "can't get memory for channels\n");
		return xpNoMemory;
	}

	/* allocate the remote open and close args */

	part->remote_openclose_args =
	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
					  GFP_KERNEL, &part->
					  remote_openclose_args_base);
	if (part->remote_openclose_args == NULL) {
		dev_err(xpc_chan, "can't get memory for remote connect args\n");
		ret = xpNoMemory;
		goto out_1;
	}

	part->chctl.all_flags = 0;
	spin_lock_init(&part->chctl_lock);

	atomic_set(&part->channel_mgr_requests, 1);
	init_waitqueue_head(&part->channel_mgr_wq);

	part->nchannels = XPC_MAX_NCHANNELS;

	atomic_set(&part->nchannels_active, 0);
	atomic_set(&part->nchannels_engaged, 0);

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		ch->partid = partid;
		ch->number = ch_number;
		ch->flags = XPC_C_DISCONNECTED;

		atomic_set(&ch->kthreads_assigned, 0);
		atomic_set(&ch->kthreads_idle, 0);
		atomic_set(&ch->kthreads_active, 0);

		atomic_set(&ch->references, 0);
		atomic_set(&ch->n_to_notify, 0);

		spin_lock_init(&ch->lock);
		init_completion(&ch->wdisconnect_wait);

		atomic_set(&ch->n_on_msg_allocate_wq, 0);
		init_waitqueue_head(&ch->msg_allocate_wq);
		init_waitqueue_head(&ch->idle_wq);
	}

R
Robin Holt 已提交
472
	ret = xpc_arch_ops.setup_ch_structures(part);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	if (ret != xpSuccess)
		goto out_2;

	/*
	 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
	 * we're declaring that this partition is ready to go.
	 */
	part->setup_state = XPC_P_SS_SETUP;

	return xpSuccess;

	/* setup of ch structures failed */
out_2:
	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
out_1:
	kfree(part->channels);
	part->channels = NULL;
	return ret;
}

/*
 * Teardown the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
	DBUG_ON(atomic_read(&part->nchannels_active) != 0);

	/*
	 * Make this partition inaccessible to local processes by marking it
	 * as no longer setup. Then wait before proceeding with the teardown
	 * until all existing references cease.
	 */
	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
	part->setup_state = XPC_P_SS_WTEARDOWN;

	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));

	/* now we can begin tearing down the infrastructure */

R
Robin Holt 已提交
516
	xpc_arch_ops.teardown_ch_structures(part);
517 518 519 520 521 522 523 524 525

	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
	kfree(part->channels);
	part->channels = NULL;

	part->setup_state = XPC_P_SS_TORNDOWN;
}

526 527 528 529 530 531 532
/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
533 534 535
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
536 537 538 539
 */
static int
xpc_activating(void *__partid)
{
540
	short partid = (u64)__partid;
541 542 543
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

544
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
545 546 547

	spin_lock_irqsave(&part->act_lock, irq_flags);

548 549
	if (part->act_state == XPC_P_AS_DEACTIVATING) {
		part->act_state = XPC_P_AS_INACTIVE;
550 551 552 553 554 555
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
556 557
	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
	part->act_state = XPC_P_AS_ACTIVATING;
558 559 560 561

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

562
	dev_dbg(xpc_part, "activating partition %d\n", partid);
563

R
Robin Holt 已提交
564
	xpc_arch_ops.allow_hb(partid);
565

566
	if (xpc_setup_ch_structures(part) == xpSuccess) {
567 568
		(void)xpc_part_ref(part);	/* this will always succeed */

R
Robin Holt 已提交
569
		if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
570 571 572 573 574 575
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
576
		xpc_teardown_ch_structures(part);
577
	}
578

R
Robin Holt 已提交
579
	xpc_arch_ops.disallow_hb(partid);
580 581
	xpc_mark_partition_inactive(part);

582
	if (part->reason == xpReactivating) {
583
		/* interrupting ourselves results in activating partition */
R
Robin Holt 已提交
584
		xpc_arch_ops.request_partition_reactivation(part);
585 586 587 588 589 590 591 592
	}

	return 0;
}

void
xpc_activate_partition(struct xpc_partition *part)
{
593
	short partid = XPC_PARTID(part);
594
	unsigned long irq_flags;
595
	struct task_struct *kthread;
596 597 598

	spin_lock_irqsave(&part->act_lock, irq_flags);

599
	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
600

601
	part->act_state = XPC_P_AS_ACTIVATION_REQ;
602
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
603 604

	spin_unlock_irqrestore(&part->act_lock, irq_flags);
605

606 607 608
	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
609
		spin_lock_irqsave(&part->act_lock, irq_flags);
610
		part->act_state = XPC_P_AS_INACTIVE;
611
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
612 613
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
}

void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

636
	if (needed <= 0)
637 638 639 640
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
641
		if (needed <= 0)
642 643 644 645 646 647
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

648
	xpc_create_kthreads(ch, needed, 0);
649 650 651 652 653 654 655 656
}

/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
R
Robin Holt 已提交
657 658 659
	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
		xpc_arch_ops.n_of_deliverable_payloads;

660 661 662
	do {
		/* deliver messages to their intended recipients */

R
Robin Holt 已提交
663
		while (n_of_deliverable_payloads(ch) > 0 &&
664
		       !(ch->flags & XPC_C_DISCONNECTING)) {
665
			xpc_deliver_payload(ch);
666 667 668
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
669
		    ch->kthreads_idle_limit) {
670 671 672 673 674 675 676 677
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

678
		(void)wait_event_interruptible_exclusive(ch->idle_wq,
R
Robin Holt 已提交
679
				(n_of_deliverable_payloads(ch) > 0 ||
680
				 (ch->flags & XPC_C_DISCONNECTING)));
681 682 683

		atomic_dec(&ch->kthreads_idle);

684
	} while (!(ch->flags & XPC_C_DISCONNECTING));
685 686 687
}

static int
688
xpc_kthread_start(void *args)
689
{
690
	short partid = XPC_UNPACK_ARG1(args);
691 692 693 694
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
695
	unsigned long irq_flags;
R
Robin Holt 已提交
696 697
	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
		xpc_arch_ops.n_of_deliverable_payloads;
698 699 700 701 702 703 704 705 706 707

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

708
		spin_lock_irqsave(&ch->lock, irq_flags);
709 710
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
711 712
			spin_unlock_irqrestore(&ch->lock, irq_flags);

713 714
			xpc_connected_callout(ch);

715 716 717 718
			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

719 720 721 722 723 724 725
			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
R
Robin Holt 已提交
726
			n_needed = n_of_deliverable_payloads(ch) - 1;
727
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
728
				xpc_activate_kthreads(ch, n_needed);
729

730 731
		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
732 733 734 735 736
		}

		xpc_kthread_waitmsgs(part, ch);
	}

737
	/* let registerer know that connection is disconnecting */
738

739 740
	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
741
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
742
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
743
		spin_unlock_irqrestore(&ch->lock, irq_flags);
744

745
		xpc_disconnect_callout(ch, xpDisconnecting);
746 747 748 749 750 751

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

752 753
	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
	    atomic_dec_return(&part->nchannels_engaged) == 0) {
R
Robin Holt 已提交
754
		xpc_arch_ops.indicate_partition_disengaged(part);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}

/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
779
xpc_create_kthreads(struct xpc_channel *ch, int needed,
780
		    int ignore_disconnecting)
781 782 783
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
784
	struct xpc_partition *part = &xpc_partitions[ch->partid];
785
	struct task_struct *kthread;
R
Robin Holt 已提交
786 787
	void (*indicate_partition_disengaged) (struct xpc_partition *) =
		xpc_arch_ops.indicate_partition_disengaged;
788 789

	while (needed-- > 0) {
790 791 792 793 794 795

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
796 797 798 799
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
800
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
801 802 803 804 805 806
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

807 808
		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
R
Robin Holt 已提交
809
			xpc_arch_ops.indicate_partition_engaged(part);
810
		}
811
		(void)xpc_part_ref(part);
812 813
		xpc_msgqueue_ref(ch);

814 815 816
		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
817
			/* the fork failed */
818 819 820 821 822 823 824

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
825
			 * them is the xpDisconnecting callout that this
826
			 * failed kthread_run() would have made.
827 828
			 */

829 830
			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
R
Robin Holt 已提交
831
				indicate_partition_disengaged(part);
832 833 834
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
835 836

			if (atomic_read(&ch->kthreads_assigned) <
837
			    ch->kthreads_idle_limit) {
838 839 840 841 842 843
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
844
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
845
						       &irq_flags);
846 847 848 849 850 851 852 853 854 855
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}

void
xpc_disconnect_wait(int ch_number)
{
856
	unsigned long irq_flags;
857
	short partid;
858 859
	struct xpc_partition *part;
	struct xpc_channel *ch;
860
	int wakeup_channel_mgr;
861 862

	/* now wait for all callouts to the caller's function to cease */
863
	for (partid = 0; partid < xp_max_npartitions; partid++) {
864 865
		part = &xpc_partitions[partid];

866
		if (!xpc_part_ref(part))
867
			continue;
868

869
		ch = &part->channels[ch_number];
870

871
		if (!(ch->flags & XPC_C_WDISCONNECT)) {
872
			xpc_part_deref(part);
873
			continue;
874
		}
875

J
Jes Sorensen 已提交
876
		wait_for_completion(&ch->wdisconnect_wait);
877 878 879 880 881

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

882
		if (ch->delayed_chctl_flags) {
883
			if (part->act_state != XPC_P_AS_DEACTIVATING) {
884 885 886 887
				spin_lock(&part->chctl_lock);
				part->chctl.flags[ch->number] |=
				    ch->delayed_chctl_flags;
				spin_unlock(&part->chctl_lock);
888 889
				wakeup_channel_mgr = 1;
			}
890
			ch->delayed_chctl_flags = 0;
891
		}
892 893 894 895

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

896
		if (wakeup_channel_mgr)
897 898 899
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
900 901 902
	}
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
static int
xpc_setup_partitions(void)
{
	short partid;
	struct xpc_partition *part;

	xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
				 xp_max_npartitions, GFP_KERNEL);
	if (xpc_partitions == NULL) {
		dev_err(xpc_part, "can't get memory for partition structure\n");
		return -ENOMEM;
	}

	/*
	 * The first few fields of each entry of xpc_partitions[] need to
	 * be initialized now so that calls to xpc_connect() and
	 * xpc_disconnect() can be made prior to the activation of any remote
	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
	 * PARTITION HAS BEEN ACTIVATED.
	 */
	for (partid = 0; partid < xp_max_npartitions; partid++) {
		part = &xpc_partitions[partid];

		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));

		part->activate_IRQ_rcvd = 0;
		spin_lock_init(&part->act_lock);
		part->act_state = XPC_P_AS_INACTIVE;
		XPC_SET_REASON(part, 0, 0);

		init_timer(&part->disengage_timer);
		part->disengage_timer.function =
		    xpc_timeout_partition_disengage;
		part->disengage_timer.data = (unsigned long)part;

		part->setup_state = XPC_P_SS_UNSET;
		init_waitqueue_head(&part->teardown_wq);
		atomic_set(&part->references, 0);
	}

R
Robin Holt 已提交
944
	return xpc_arch_ops.setup_partitions();
945 946 947 948 949
}

static void
xpc_teardown_partitions(void)
{
R
Robin Holt 已提交
950
	xpc_arch_ops.teardown_partitions();
951 952 953
	kfree(xpc_partitions);
}

954
static void
955
xpc_do_exit(enum xp_retval reason)
956
{
957
	short partid;
958
	int active_part_count, printed_waiting_msg = 0;
959
	struct xpc_partition *part;
960
	unsigned long printmsg_time, disengage_timeout = 0;
961

962 963
	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
	DBUG_ON(xpc_exiting == 1);
964 965

	/*
966 967 968
	 * Let the heartbeat checker thread and the discovery thread
	 * (if one is running) know that they should exit. Also wake up
	 * the heartbeat checker thread in case it's sleeping.
969 970
	 */
	xpc_exiting = 1;
971
	wake_up_interruptible(&xpc_activate_IRQ_wq);
972

973
	/* wait for the discovery thread to exit */
J
Jes Sorensen 已提交
974
	wait_for_completion(&xpc_discovery_exited);
975

976
	/* wait for the heartbeat checker thread to exit */
J
Jes Sorensen 已提交
977
	wait_for_completion(&xpc_hb_checker_exited);
978

979
	/* sleep for a 1/3 of a second or so */
980
	(void)msleep_interruptible(300);
981 982 983

	/* wait for all partitions to become inactive */

984 985
	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
	xpc_disengage_timedout = 0;
986

987 988 989
	do {
		active_part_count = 0;

990
		for (partid = 0; partid < xp_max_npartitions; partid++) {
991 992
			part = &xpc_partitions[partid];

993
			if (xpc_partition_disengaged(part) &&
994
			    part->act_state == XPC_P_AS_INACTIVE) {
995
				continue;
996
			}
997 998 999 1000

			active_part_count++;

			XPC_DEACTIVATE_PARTITION(part, reason);
1001

1002 1003
			if (part->disengage_timeout > disengage_timeout)
				disengage_timeout = part->disengage_timeout;
1004
		}
1005

R
Robin Holt 已提交
1006
		if (xpc_arch_ops.any_partition_engaged()) {
1007
			if (time_is_before_jiffies(printmsg_time)) {
1008
				dev_info(xpc_part, "waiting for remote "
1009 1010 1011
					 "partitions to deactivate, timeout in "
					 "%ld seconds\n", (disengage_timeout -
					 jiffies) / HZ);
1012
				printmsg_time = jiffies +
1013
				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
1014 1015 1016 1017 1018 1019
				printed_waiting_msg = 1;
			}

		} else if (active_part_count > 0) {
			if (printed_waiting_msg) {
				dev_info(xpc_part, "waiting for local partition"
1020
					 " to deactivate\n");
1021 1022 1023 1024
				printed_waiting_msg = 0;
			}

		} else {
1025
			if (!xpc_disengage_timedout) {
1026
				dev_info(xpc_part, "all partitions have "
1027
					 "deactivated\n");
1028 1029
			}
			break;
1030 1031
		}

1032
		/* sleep for a 1/3 of a second or so */
1033
		(void)msleep_interruptible(300);
1034 1035 1036

	} while (1);

R
Robin Holt 已提交
1037
	DBUG_ON(xpc_arch_ops.any_partition_engaged());
1038

1039
	xpc_teardown_rsvd_page();
1040

1041
	if (reason == xpUnloading) {
1042
		(void)unregister_die_notifier(&xpc_die_notifier);
1043
		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1044
	}
1045

1046 1047 1048
	/* clear the interface to XPC's functions */
	xpc_clear_interface();

1049
	if (xpc_sysctl)
1050
		unregister_sysctl_table(xpc_sysctl);
1051

1052
	xpc_teardown_partitions();
1053 1054 1055

	if (is_shub())
		xpc_exit_sn2();
1056
	else if (is_uv())
1057
		xpc_exit_uv();
1058 1059
}

1060
/*
1061 1062 1063 1064 1065
 * This function is called when the system is being rebooted.
 */
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
1066
	enum xp_retval reason;
1067 1068 1069

	switch (event) {
	case SYS_RESTART:
1070
		reason = xpSystemReboot;
1071 1072
		break;
	case SYS_HALT:
1073
		reason = xpSystemHalt;
1074 1075
		break;
	case SYS_POWER_OFF:
1076
		reason = xpSystemPoweroff;
1077 1078
		break;
	default:
1079
		reason = xpSystemGoingDown;
1080 1081 1082 1083 1084 1085
	}

	xpc_do_exit(reason);
	return NOTIFY_DONE;
}

R
Robin Holt 已提交
1086 1087 1088
/* Used to only allow one cpu to complete disconnect */
static unsigned int xpc_die_disconnecting;

1089
/*
1090 1091
 * Notify other partitions to deactivate from us by first disengaging from all
 * references to our memory.
1092 1093
 */
static void
1094
xpc_die_deactivate(void)
1095 1096
{
	struct xpc_partition *part;
1097
	short partid;
1098
	int any_engaged;
1099 1100
	long keep_waiting;
	long wait_to_print;
1101

R
Robin Holt 已提交
1102 1103 1104
	if (cmpxchg(&xpc_die_disconnecting, 0, 1))
		return;

1105 1106 1107
	/* keep xpc_hb_checker thread from doing anything (just in case) */
	xpc_exiting = 1;

R
Robin Holt 已提交
1108
	xpc_arch_ops.disallow_all_hbs();   /*indicate we're deactivated */
1109

1110
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1111 1112
		part = &xpc_partitions[partid];

R
Robin Holt 已提交
1113
		if (xpc_arch_ops.partition_engaged(partid) ||
1114
		    part->act_state != XPC_P_AS_INACTIVE) {
R
Robin Holt 已提交
1115 1116
			xpc_arch_ops.request_partition_deactivation(part);
			xpc_arch_ops.indicate_partition_disengaged(part);
1117 1118 1119
		}
	}

1120 1121
	/*
	 * Though we requested that all other partitions deactivate from us,
1122 1123 1124 1125 1126 1127
	 * we only wait until they've all disengaged or we've reached the
	 * defined timelimit.
	 *
	 * Given that one iteration through the following while-loop takes
	 * approximately 200 microseconds, calculate the #of loops to take
	 * before bailing and the #of loops before printing a waiting message.
1128
	 */
1129 1130
	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1131

1132
	while (1) {
R
Robin Holt 已提交
1133
		any_engaged = xpc_arch_ops.any_partition_engaged();
1134 1135
		if (!any_engaged) {
			dev_info(xpc_part, "all partitions have deactivated\n");
1136 1137
			break;
		}
1138

1139
		if (!keep_waiting--) {
1140 1141
			for (partid = 0; partid < xp_max_npartitions;
			     partid++) {
R
Robin Holt 已提交
1142
				if (xpc_arch_ops.partition_engaged(partid)) {
1143
					dev_info(xpc_part, "deactivate from "
1144 1145
						 "remote partition %d timed "
						 "out\n", partid);
1146 1147 1148 1149 1150
				}
			}
			break;
		}

1151
		if (!wait_to_print--) {
1152
			dev_info(xpc_part, "waiting for remote partitions to "
1153
				 "deactivate, timeout in %ld seconds\n",
1154 1155 1156
				 keep_waiting / (1000 * 5));
			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
			    1000 * 5;
1157
		}
1158 1159

		udelay(200);
1160 1161 1162 1163
	}
}

/*
1164 1165 1166 1167 1168 1169
 * This function is called when the system is being restarted or halted due
 * to some sort of system failure. If this is the case we need to notify the
 * other partitions to disengage from all references to our memory.
 * This function can also be called when our heartbeater could be offlined
 * for a time. In this case we need to notify other partitions to not worry
 * about the lack of a heartbeat.
1170 1171
 */
static int
R
Robin Holt 已提交
1172
xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
1173
{
1174
#ifdef CONFIG_IA64		/* !!! temporary kludge */
1175 1176 1177
	switch (event) {
	case DIE_MACHINE_RESTART:
	case DIE_MACHINE_HALT:
1178
		xpc_die_deactivate();
1179
		break;
1180 1181 1182

	case DIE_KDEBUG_ENTER:
		/* Should lack of heartbeat be ignored by other partitions? */
1183
		if (!xpc_kdebug_ignore)
1184
			break;
1185

1186
		/* fall through */
1187 1188
	case DIE_MCA_MONARCH_ENTER:
	case DIE_INIT_MONARCH_ENTER:
R
Robin Holt 已提交
1189
		xpc_arch_ops.offline_heartbeat();
1190
		break;
1191 1192 1193

	case DIE_KDEBUG_LEAVE:
		/* Is lack of heartbeat being ignored by other partitions? */
1194
		if (!xpc_kdebug_ignore)
1195
			break;
1196

1197
		/* fall through */
1198 1199
	case DIE_MCA_MONARCH_LEAVE:
	case DIE_INIT_MONARCH_LEAVE:
R
Robin Holt 已提交
1200
		xpc_arch_ops.online_heartbeat();
1201 1202
		break;
	}
1203
#else
R
Robin Holt 已提交
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	struct die_args *die_args = _die_args;

	switch (event) {
	case DIE_TRAP:
		if (die_args->trapnr == X86_TRAP_DF)
			xpc_die_deactivate();

		if (((die_args->trapnr == X86_TRAP_MF) ||
		     (die_args->trapnr == X86_TRAP_XF)) &&
		    !user_mode_vm(die_args->regs))
			xpc_die_deactivate();

		break;
	case DIE_INT3:
	case DIE_DEBUG:
		break;
	case DIE_OOPS:
	case DIE_GPF:
	default:
		xpc_die_deactivate();
	}
1225
#endif
1226 1227 1228 1229

	return NOTIFY_DONE;
}

1230 1231 1232 1233
int __init
xpc_init(void)
{
	int ret;
1234
	struct task_struct *kthread;
1235

1236 1237
	dev_set_name(xpc_part, "part");
	dev_set_name(xpc_chan, "chan");
1238

1239 1240 1241
	if (is_shub()) {
		/*
		 * The ia64-sn2 architecture supports at most 64 partitions.
1242
		 * And the inability to unregister remote amos restricts us
1243 1244 1245
		 * further to only support exactly 64 partitions on this
		 * architecture, no less.
		 */
1246 1247 1248 1249 1250 1251
		if (xp_max_npartitions != 64) {
			dev_err(xpc_part, "max #of partitions not set to 64\n");
			ret = -EINVAL;
		} else {
			ret = xpc_init_sn2();
		}
1252 1253

	} else if (is_uv()) {
1254
		ret = xpc_init_uv();
1255 1256

	} else {
1257
		ret = -ENODEV;
1258
	}
1259

1260 1261 1262 1263 1264
	if (ret != 0)
		return ret;

	ret = xpc_setup_partitions();
	if (ret != 0) {
1265
		dev_err(xpc_part, "can't get memory for partition structure\n");
1266
		goto out_1;
1267
	}
1268

1269 1270
	xpc_sysctl = register_sysctl_table(xpc_sys_dir);

1271 1272 1273 1274 1275
	/*
	 * Fill the partition reserved page with the information needed by
	 * other partitions to discover we are alive and establish initial
	 * communications.
	 */
1276 1277
	ret = xpc_setup_rsvd_page();
	if (ret != 0) {
1278
		dev_err(xpc_part, "can't setup our reserved page\n");
1279
		goto out_2;
1280 1281
	}

1282 1283
	/* add ourselves to the reboot_notifier_list */
	ret = register_reboot_notifier(&xpc_reboot_notifier);
1284
	if (ret != 0)
1285 1286
		dev_warn(xpc_part, "can't register reboot notifier\n");

1287
	/* add ourselves to the die_notifier list */
1288
	ret = register_die_notifier(&xpc_die_notifier);
1289
	if (ret != 0)
1290 1291
		dev_warn(xpc_part, "can't register die notifier\n");

1292 1293 1294 1295
	/*
	 * The real work-horse behind xpc.  This processes incoming
	 * interrupts and monitors remote heartbeats.
	 */
1296 1297
	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
	if (IS_ERR(kthread)) {
1298
		dev_err(xpc_part, "failed while forking hb check thread\n");
1299
		ret = -EBUSY;
1300
		goto out_3;
1301 1302 1303 1304 1305 1306 1307
	}

	/*
	 * Startup a thread that will attempt to discover other partitions to
	 * activate based on info provided by SAL. This new thread is short
	 * lived and will exit once discovery is complete.
	 */
1308 1309 1310
	kthread = kthread_run(xpc_initiate_discovery, NULL,
			      XPC_DISCOVERY_THREAD_NAME);
	if (IS_ERR(kthread)) {
1311 1312 1313
		dev_err(xpc_part, "failed while forking discovery thread\n");

		/* mark this new thread as a non-starter */
J
Jes Sorensen 已提交
1314
		complete(&xpc_discovery_exited);
1315

1316
		xpc_do_exit(xpUnloading);
1317 1318 1319 1320 1321
		return -EBUSY;
	}

	/* set the interface to point at XPC's functions */
	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1322 1323
			  xpc_initiate_send, xpc_initiate_send_notify,
			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1324 1325

	return 0;
1326 1327

	/* initialization was not successful */
1328
out_3:
1329
	xpc_teardown_rsvd_page();
1330

1331 1332
	(void)unregister_die_notifier(&xpc_die_notifier);
	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1333
out_2:
1334 1335
	if (xpc_sysctl)
		unregister_sysctl_table(xpc_sysctl);
1336 1337

	xpc_teardown_partitions();
1338 1339 1340
out_1:
	if (is_shub())
		xpc_exit_sn2();
1341
	else if (is_uv())
1342
		xpc_exit_uv();
1343
	return ret;
1344 1345
}

1346
module_init(xpc_init);
1347 1348 1349 1350

void __exit
xpc_exit(void)
{
1351
	xpc_do_exit(xpUnloading);
1352 1353
}

1354
module_exit(xpc_exit);
1355 1356 1357 1358 1359 1360 1361

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");

module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1362
		 "heartbeat increments.");
1363 1364 1365

module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1366
		 "heartbeat checks.");
1367

1368 1369 1370
module_param(xpc_disengage_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
		 "for disengage to complete.");
1371

1372 1373
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1374
		 "other partitions when dropping into kdebug.");