xpc_main.c 36.1 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 */

/*
 * Cross Partition Communication (XPC) support - standard version.
 *
 *	XPC provides a message passing capability that crosses partition
 *	boundaries. This module is made up of two parts:
 *
 *	    partition	This part detects the presence/absence of other
 *			partitions. It provides a heartbeat and monitors
 *			the heartbeats of other partitions.
 *
 *	    channel	This part manages the channels and sends/receives
 *			messages across them to/from other partitions.
 *
 *	There are a couple of additional functions residing in XP, which
 *	provide an interface to XPC for its users.
 *
 *
 *	Caveats:
 *
28
 *	  . Currently on sn2, we have no way to determine which nasid an IRQ
29 30 31 32 33
 *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
 *	    followed by an IPI. The amo indicates where data is to be pulled
 *	    from, so after the IPI arrives, the remote partition checks the amo
 *	    word. The IPI can actually arrive before the amo however, so other
 *	    code must periodically check for this case. Also, remote amo
34 35 36 37
 *	    operations do not reliably time out. Thus we do a remote PIO read
 *	    solely to know whether the remote partition is down and whether we
 *	    should stop sending IPIs to it. This remote PIO read operation is
 *	    set up in a special nofault region so SAL knows to ignore (and
38
 *	    cleanup) any errors due to the remote amo write, PIO read, and/or
39
 *	    PIO write operations.
40 41 42 43 44 45 46
 *
 *	    If/when new hardware solves this IPI problem, we should abandon
 *	    the current approach.
 *
 */

#include <linux/module.h>
47
#include <linux/slab.h>
48 49
#include <linux/sysctl.h>
#include <linux/device.h>
50
#include <linux/delay.h>
51
#include <linux/reboot.h>
52
#include <linux/kdebug.h>
53
#include <linux/kthread.h>
54
#include "xpc.h"
55

R
Robin Holt 已提交
56 57 58 59
#ifdef CONFIG_X86_64
#include <asm/traps.h>
#endif

60 61 62 63 64 65 66
/* define two XPC debug device structures to be used with dev_dbg() et al */

struct device_driver xpc_dbg_name = {
	.name = "xpc"
};

struct device xpc_part_dbg_subname = {
67
	.init_name = "",	/* set to "part" at xpc_init() time */
68 69 70 71
	.driver = &xpc_dbg_name
};

struct device xpc_chan_dbg_subname = {
72
	.init_name = "",	/* set to "chan" at xpc_init() time */
73 74 75 76 77 78
	.driver = &xpc_dbg_name
};

struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;

79 80
static int xpc_kdebug_ignore;

81 82
/* systune related variables for /proc/sys directories */

83 84 85
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
86

87 88 89
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
90

91 92 93
int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit;	/* = 0 */
static int xpc_disengage_max_timelimit = 120;
94

95
static struct ctl_table xpc_sys_xpc_hb_dir[] = {
96
	{
97 98 99 100
	 .procname = "hb_interval",
	 .data = &xpc_hb_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
101
	 .proc_handler = proc_dointvec_minmax,
102 103
	 .extra1 = &xpc_hb_min_interval,
	 .extra2 = &xpc_hb_max_interval},
104
	{
105 106 107 108
	 .procname = "hb_check_interval",
	 .data = &xpc_hb_check_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
109
	 .proc_handler = proc_dointvec_minmax,
110 111
	 .extra1 = &xpc_hb_check_min_interval,
	 .extra2 = &xpc_hb_check_max_interval},
112
	{}
113
};
114
static struct ctl_table xpc_sys_xpc_dir[] = {
115
	{
116 117 118
	 .procname = "hb",
	 .mode = 0555,
	 .child = xpc_sys_xpc_hb_dir},
119
	{
120 121
	 .procname = "disengage_timelimit",
	 .data = &xpc_disengage_timelimit,
122 123
	 .maxlen = sizeof(int),
	 .mode = 0644,
124
	 .proc_handler = proc_dointvec_minmax,
125 126
	 .extra1 = &xpc_disengage_min_timelimit,
	 .extra2 = &xpc_disengage_max_timelimit},
127
	{}
128
};
129
static struct ctl_table xpc_sys_dir[] = {
130
	{
131 132 133
	 .procname = "xpc",
	 .mode = 0555,
	 .child = xpc_sys_xpc_dir},
134
	{}
135 136 137
};
static struct ctl_table_header *xpc_sysctl;

138 139
/* non-zero if any remote partition disengage was timed out */
int xpc_disengage_timedout;
140

141 142 143
/* #of activate IRQs received and not yet processed */
int xpc_activate_IRQ_rcvd;
DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
144 145

/* IRQ handler notifies this wait queue on receipt of an IRQ */
146
DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
147 148

static unsigned long xpc_hb_check_timeout;
149
static struct timer_list xpc_hb_timer;
150

151
/* notification that the xpc_hb_checker thread has exited */
J
Jes Sorensen 已提交
152
static DECLARE_COMPLETION(xpc_hb_checker_exited);
153

154
/* notification that the xpc_discovery thread has exited */
J
Jes Sorensen 已提交
155
static DECLARE_COMPLETION(xpc_discovery_exited);
156 157 158

static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);

159 160 161 162 163
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
	.notifier_call = xpc_system_reboot,
};

164 165 166 167 168
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
	.notifier_call = xpc_system_die,
};

R
Robin Holt 已提交
169
struct xpc_arch_operations xpc_arch_ops;
170

171
/*
172
 * Timer function to enforce the timelimit on the partition disengage.
173 174
 */
static void
175
xpc_timeout_partition_disengage(struct timer_list *t)
176
{
177
	struct xpc_partition *part = from_timer(part, t, disengage_timer);
178

179
	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
180

181
	(void)xpc_partition_disengaged(part);
182

183
	DBUG_ON(part->disengage_timeout != 0);
R
Robin Holt 已提交
184
	DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
185 186
}

187 188 189 190 191 192
/*
 * Timer to produce the heartbeat.  The timer structures function is
 * already set when this is initially called.  A tunable is used to
 * specify when the next timeout should occur.
 */
static void
193
xpc_hb_beater(struct timer_list *unused)
194
{
R
Robin Holt 已提交
195
	xpc_arch_ops.increment_heartbeat();
196

197
	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
198
		wake_up_interruptible(&xpc_activate_IRQ_wq);
199 200 201 202 203

	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
	add_timer(&xpc_hb_timer);
}

204 205 206
static void
xpc_start_hb_beater(void)
{
R
Robin Holt 已提交
207
	xpc_arch_ops.heartbeat_init();
208
	timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
209 210 211 212 213 214 215
	xpc_hb_beater(0);
}

static void
xpc_stop_hb_beater(void)
{
	del_timer_sync(&xpc_hb_timer);
R
Robin Holt 已提交
216
	xpc_arch_ops.heartbeat_exit();
217 218
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
/*
 * At periodic intervals, scan through all active partitions and ensure
 * their heartbeat is still active.  If not, the partition is deactivated.
 */
static void
xpc_check_remote_hb(void)
{
	struct xpc_partition *part;
	short partid;
	enum xp_retval ret;

	for (partid = 0; partid < xp_max_npartitions; partid++) {

		if (xpc_exiting)
			break;

		if (partid == xp_partition_id)
			continue;

		part = &xpc_partitions[partid];

240 241
		if (part->act_state == XPC_P_AS_INACTIVE ||
		    part->act_state == XPC_P_AS_DEACTIVATING) {
242 243 244
			continue;
		}

R
Robin Holt 已提交
245
		ret = xpc_arch_ops.get_remote_heartbeat(part);
246 247 248 249 250
		if (ret != xpSuccess)
			XPC_DEACTIVATE_PARTITION(part, ret);
	}
}

251 252 253 254 255 256 257
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
258
	int force_IRQ = 0;
259 260 261

	/* this thread was marked active by xpc_hb_init() */

262
	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
263

264
	/* set our heartbeating to other partitions into motion */
265
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
266
	xpc_start_hb_beater();
267

268
	while (!xpc_exiting) {
269 270 271

		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
			"been received\n",
272
			(int)(xpc_hb_check_timeout - jiffies),
273
			xpc_activate_IRQ_rcvd);
274 275

		/* checking of remote heartbeats is skewed by IRQ handling */
276
		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
277 278 279
			xpc_hb_check_timeout = jiffies +
			    (xpc_hb_check_interval * HZ);

280 281 282 283
			dev_dbg(xpc_part, "checking remote heartbeats\n");
			xpc_check_remote_hb();

			/*
284 285
			 * On sn2 we need to periodically recheck to ensure no
			 * IRQ/amo pairs have been missed.
286
			 */
287 288
			if (is_shub())
				force_IRQ = 1;
289 290
		}

291
		/* check for outstanding IRQs */
292
		if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
293
			force_IRQ = 0;
294 295
			dev_dbg(xpc_part, "processing activate IRQs "
				"received\n");
R
Robin Holt 已提交
296
			xpc_arch_ops.process_activate_IRQ_rcvd();
297
		}
298 299

		/* wait for IRQ or timeout */
300
		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
301
					       (time_is_before_eq_jiffies(
302
						xpc_hb_check_timeout) ||
303
						xpc_activate_IRQ_rcvd > 0 ||
304
						xpc_exiting));
305 306
	}

307 308
	xpc_stop_hb_beater();

309 310
	dev_dbg(xpc_part, "heartbeat checker is exiting\n");

311
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
312
	complete(&xpc_hb_checker_exited);
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	return 0;
}

/*
 * This thread will attempt to discover other partitions to activate
 * based on info provided by SAL. This new thread is short lived and
 * will exit once discovery is complete.
 */
static int
xpc_initiate_discovery(void *ignore)
{
	xpc_discovery();

	dev_dbg(xpc_part, "discovery thread is exiting\n");

328
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
329
	complete(&xpc_discovery_exited);
330 331 332 333 334
	return 0;
}

/*
 * The first kthread assigned to a newly activated partition is the one
335
 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
336 337 338 339 340 341 342 343 344 345 346 347
 * that kthread until the partition is brought down, at which time that kthread
 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
 * that XPC has dismantled all communication infrastructure for the associated
 * partition.) This kthread becomes the channel manager for that partition.
 *
 * Each active partition has a channel manager, who, besides connecting and
 * disconnecting channels, will ensure that each of the partition's connected
 * channels has the required number of assigned kthreads to get the work done.
 */
static void
xpc_channel_mgr(struct xpc_partition *part)
{
348
	while (part->act_state != XPC_P_AS_DEACTIVATING ||
349 350
	       atomic_read(&part->nchannels_active) > 0 ||
	       !xpc_partition_disengaged(part)) {
351

352
		xpc_process_sent_chctl_flags(part);
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367

		/*
		 * Wait until we've been requested to activate kthreads or
		 * all of the channel's message queues have been torn down or
		 * a signal is pending.
		 *
		 * The channel_mgr_requests is set to 1 after being awakened,
		 * This is done to prevent the channel mgr from making one pass
		 * through the loop for each request, since he will
		 * be servicing all the requests in one pass. The reason it's
		 * set to 1 instead of 0 is so that other kthreads will know
		 * that the channel mgr is running and won't bother trying to
		 * wake him up.
		 */
		atomic_dec(&part->channel_mgr_requests);
368
		(void)wait_event_interruptible(part->channel_mgr_wq,
369
				(atomic_read(&part->channel_mgr_requests) > 0 ||
370
				 part->chctl.all_flags != 0 ||
371
				 (part->act_state == XPC_P_AS_DEACTIVATING &&
372 373
				 atomic_read(&part->nchannels_active) == 0 &&
				 xpc_partition_disengaged(part))));
374 375 376 377
		atomic_set(&part->channel_mgr_requests, 1);
	}
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/*
 * Guarantee that the kzalloc'd memory is cacheline aligned.
 */
void *
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
	/* see if kzalloc will give us cachline aligned memory by default */
	*base = kzalloc(size, flags);
	if (*base == NULL)
		return NULL;

	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
		return *base;

	kfree(*base);

	/* nope, we'll have to do it ourselves */
	*base = kzalloc(size + L1_CACHE_BYTES, flags);
	if (*base == NULL)
		return NULL;

	return (void *)L1_CACHE_ALIGN((u64)*base);
}

/*
 * Setup the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static enum xp_retval
xpc_setup_ch_structures(struct xpc_partition *part)
{
	enum xp_retval ret;
	int ch_number;
	struct xpc_channel *ch;
	short partid = XPC_PARTID(part);

	/*
	 * Allocate all of the channel structures as a contiguous chunk of
	 * memory.
	 */
	DBUG_ON(part->channels != NULL);
	part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
				 GFP_KERNEL);
	if (part->channels == NULL) {
		dev_err(xpc_chan, "can't get memory for channels\n");
		return xpNoMemory;
	}

	/* allocate the remote open and close args */

	part->remote_openclose_args =
	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
					  GFP_KERNEL, &part->
					  remote_openclose_args_base);
	if (part->remote_openclose_args == NULL) {
		dev_err(xpc_chan, "can't get memory for remote connect args\n");
		ret = xpNoMemory;
		goto out_1;
	}

	part->chctl.all_flags = 0;
	spin_lock_init(&part->chctl_lock);

	atomic_set(&part->channel_mgr_requests, 1);
	init_waitqueue_head(&part->channel_mgr_wq);

	part->nchannels = XPC_MAX_NCHANNELS;

	atomic_set(&part->nchannels_active, 0);
	atomic_set(&part->nchannels_engaged, 0);

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		ch->partid = partid;
		ch->number = ch_number;
		ch->flags = XPC_C_DISCONNECTED;

		atomic_set(&ch->kthreads_assigned, 0);
		atomic_set(&ch->kthreads_idle, 0);
		atomic_set(&ch->kthreads_active, 0);

		atomic_set(&ch->references, 0);
		atomic_set(&ch->n_to_notify, 0);

		spin_lock_init(&ch->lock);
		init_completion(&ch->wdisconnect_wait);

		atomic_set(&ch->n_on_msg_allocate_wq, 0);
		init_waitqueue_head(&ch->msg_allocate_wq);
		init_waitqueue_head(&ch->idle_wq);
	}

R
Robin Holt 已提交
471
	ret = xpc_arch_ops.setup_ch_structures(part);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	if (ret != xpSuccess)
		goto out_2;

	/*
	 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
	 * we're declaring that this partition is ready to go.
	 */
	part->setup_state = XPC_P_SS_SETUP;

	return xpSuccess;

	/* setup of ch structures failed */
out_2:
	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
out_1:
	kfree(part->channels);
	part->channels = NULL;
	return ret;
}

/*
 * Teardown the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
	DBUG_ON(atomic_read(&part->nchannels_active) != 0);

	/*
	 * Make this partition inaccessible to local processes by marking it
	 * as no longer setup. Then wait before proceeding with the teardown
	 * until all existing references cease.
	 */
	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
	part->setup_state = XPC_P_SS_WTEARDOWN;

	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));

	/* now we can begin tearing down the infrastructure */

R
Robin Holt 已提交
515
	xpc_arch_ops.teardown_ch_structures(part);
516 517 518 519 520 521 522 523 524

	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
	kfree(part->channels);
	part->channels = NULL;

	part->setup_state = XPC_P_SS_TORNDOWN;
}

525 526 527 528 529 530 531
/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
532 533 534
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
535 536 537 538
 */
static int
xpc_activating(void *__partid)
{
539
	short partid = (u64)__partid;
540 541 542
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

543
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
544 545 546

	spin_lock_irqsave(&part->act_lock, irq_flags);

547 548
	if (part->act_state == XPC_P_AS_DEACTIVATING) {
		part->act_state = XPC_P_AS_INACTIVE;
549 550 551 552 553 554
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
555 556
	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
	part->act_state = XPC_P_AS_ACTIVATING;
557 558 559 560

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

561
	dev_dbg(xpc_part, "activating partition %d\n", partid);
562

R
Robin Holt 已提交
563
	xpc_arch_ops.allow_hb(partid);
564

565
	if (xpc_setup_ch_structures(part) == xpSuccess) {
566 567
		(void)xpc_part_ref(part);	/* this will always succeed */

R
Robin Holt 已提交
568
		if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
569 570 571 572 573 574
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
575
		xpc_teardown_ch_structures(part);
576
	}
577

R
Robin Holt 已提交
578
	xpc_arch_ops.disallow_hb(partid);
579 580
	xpc_mark_partition_inactive(part);

581
	if (part->reason == xpReactivating) {
582
		/* interrupting ourselves results in activating partition */
R
Robin Holt 已提交
583
		xpc_arch_ops.request_partition_reactivation(part);
584 585 586 587 588 589 590 591
	}

	return 0;
}

void
xpc_activate_partition(struct xpc_partition *part)
{
592
	short partid = XPC_PARTID(part);
593
	unsigned long irq_flags;
594
	struct task_struct *kthread;
595 596 597

	spin_lock_irqsave(&part->act_lock, irq_flags);

598
	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
599

600
	part->act_state = XPC_P_AS_ACTIVATION_REQ;
601
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
602 603

	spin_unlock_irqrestore(&part->act_lock, irq_flags);
604

605 606 607
	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
608
		spin_lock_irqsave(&part->act_lock, irq_flags);
609
		part->act_state = XPC_P_AS_INACTIVE;
610
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
611 612
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
}

void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

635
	if (needed <= 0)
636 637 638 639
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
640
		if (needed <= 0)
641 642 643 644 645 646
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

647
	xpc_create_kthreads(ch, needed, 0);
648 649 650 651 652 653 654 655
}

/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
R
Robin Holt 已提交
656 657 658
	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
		xpc_arch_ops.n_of_deliverable_payloads;

659 660 661
	do {
		/* deliver messages to their intended recipients */

R
Robin Holt 已提交
662
		while (n_of_deliverable_payloads(ch) > 0 &&
663
		       !(ch->flags & XPC_C_DISCONNECTING)) {
664
			xpc_deliver_payload(ch);
665 666 667
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
668
		    ch->kthreads_idle_limit) {
669 670 671 672 673 674 675 676
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

677
		(void)wait_event_interruptible_exclusive(ch->idle_wq,
R
Robin Holt 已提交
678
				(n_of_deliverable_payloads(ch) > 0 ||
679
				 (ch->flags & XPC_C_DISCONNECTING)));
680 681 682

		atomic_dec(&ch->kthreads_idle);

683
	} while (!(ch->flags & XPC_C_DISCONNECTING));
684 685 686
}

static int
687
xpc_kthread_start(void *args)
688
{
689
	short partid = XPC_UNPACK_ARG1(args);
690 691 692 693
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
694
	unsigned long irq_flags;
R
Robin Holt 已提交
695 696
	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
		xpc_arch_ops.n_of_deliverable_payloads;
697 698 699 700 701 702 703 704 705 706

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

707
		spin_lock_irqsave(&ch->lock, irq_flags);
708 709
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
710 711
			spin_unlock_irqrestore(&ch->lock, irq_flags);

712 713
			xpc_connected_callout(ch);

714 715 716 717
			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

718 719 720 721 722 723 724
			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
R
Robin Holt 已提交
725
			n_needed = n_of_deliverable_payloads(ch) - 1;
726
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
727
				xpc_activate_kthreads(ch, n_needed);
728

729 730
		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
731 732 733 734 735
		}

		xpc_kthread_waitmsgs(part, ch);
	}

736
	/* let registerer know that connection is disconnecting */
737

738 739
	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
740
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
741
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
742
		spin_unlock_irqrestore(&ch->lock, irq_flags);
743

744
		xpc_disconnect_callout(ch, xpDisconnecting);
745 746 747 748 749 750

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

751 752
	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
	    atomic_dec_return(&part->nchannels_engaged) == 0) {
R
Robin Holt 已提交
753
		xpc_arch_ops.indicate_partition_disengaged(part);
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}

/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
778
xpc_create_kthreads(struct xpc_channel *ch, int needed,
779
		    int ignore_disconnecting)
780 781 782
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
783
	struct xpc_partition *part = &xpc_partitions[ch->partid];
784
	struct task_struct *kthread;
R
Robin Holt 已提交
785 786
	void (*indicate_partition_disengaged) (struct xpc_partition *) =
		xpc_arch_ops.indicate_partition_disengaged;
787 788

	while (needed-- > 0) {
789 790 791 792 793 794

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
795 796 797 798
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
799
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
800 801 802 803 804 805
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

806 807
		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
R
Robin Holt 已提交
808
			xpc_arch_ops.indicate_partition_engaged(part);
809
		}
810
		(void)xpc_part_ref(part);
811 812
		xpc_msgqueue_ref(ch);

813 814 815
		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
816
			/* the fork failed */
817 818 819 820 821 822 823

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
824
			 * them is the xpDisconnecting callout that this
825
			 * failed kthread_run() would have made.
826 827
			 */

828 829
			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
R
Robin Holt 已提交
830
				indicate_partition_disengaged(part);
831 832 833
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
834 835

			if (atomic_read(&ch->kthreads_assigned) <
836
			    ch->kthreads_idle_limit) {
837 838 839 840 841 842
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
843
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
844
						       &irq_flags);
845 846 847 848 849 850 851 852 853 854
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}

void
xpc_disconnect_wait(int ch_number)
{
855
	unsigned long irq_flags;
856
	short partid;
857 858
	struct xpc_partition *part;
	struct xpc_channel *ch;
859
	int wakeup_channel_mgr;
860 861

	/* now wait for all callouts to the caller's function to cease */
862
	for (partid = 0; partid < xp_max_npartitions; partid++) {
863 864
		part = &xpc_partitions[partid];

865
		if (!xpc_part_ref(part))
866
			continue;
867

868
		ch = &part->channels[ch_number];
869

870
		if (!(ch->flags & XPC_C_WDISCONNECT)) {
871
			xpc_part_deref(part);
872
			continue;
873
		}
874

J
Jes Sorensen 已提交
875
		wait_for_completion(&ch->wdisconnect_wait);
876 877 878 879 880

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

881
		if (ch->delayed_chctl_flags) {
882
			if (part->act_state != XPC_P_AS_DEACTIVATING) {
883 884 885 886
				spin_lock(&part->chctl_lock);
				part->chctl.flags[ch->number] |=
				    ch->delayed_chctl_flags;
				spin_unlock(&part->chctl_lock);
887 888
				wakeup_channel_mgr = 1;
			}
889
			ch->delayed_chctl_flags = 0;
890
		}
891 892 893 894

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

895
		if (wakeup_channel_mgr)
896 897 898
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
899 900 901
	}
}

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
static int
xpc_setup_partitions(void)
{
	short partid;
	struct xpc_partition *part;

	xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
				 xp_max_npartitions, GFP_KERNEL);
	if (xpc_partitions == NULL) {
		dev_err(xpc_part, "can't get memory for partition structure\n");
		return -ENOMEM;
	}

	/*
	 * The first few fields of each entry of xpc_partitions[] need to
	 * be initialized now so that calls to xpc_connect() and
	 * xpc_disconnect() can be made prior to the activation of any remote
	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
	 * PARTITION HAS BEEN ACTIVATED.
	 */
	for (partid = 0; partid < xp_max_npartitions; partid++) {
		part = &xpc_partitions[partid];

		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));

		part->activate_IRQ_rcvd = 0;
		spin_lock_init(&part->act_lock);
		part->act_state = XPC_P_AS_INACTIVE;
		XPC_SET_REASON(part, 0, 0);

933 934
		timer_setup(&part->disengage_timer,
			    xpc_timeout_partition_disengage, 0);
935 936 937 938 939 940

		part->setup_state = XPC_P_SS_UNSET;
		init_waitqueue_head(&part->teardown_wq);
		atomic_set(&part->references, 0);
	}

R
Robin Holt 已提交
941
	return xpc_arch_ops.setup_partitions();
942 943 944 945 946
}

static void
xpc_teardown_partitions(void)
{
R
Robin Holt 已提交
947
	xpc_arch_ops.teardown_partitions();
948 949 950
	kfree(xpc_partitions);
}

951
static void
952
xpc_do_exit(enum xp_retval reason)
953
{
954
	short partid;
955
	int active_part_count, printed_waiting_msg = 0;
956
	struct xpc_partition *part;
957
	unsigned long printmsg_time, disengage_timeout = 0;
958

959 960
	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
	DBUG_ON(xpc_exiting == 1);
961 962

	/*
963 964 965
	 * Let the heartbeat checker thread and the discovery thread
	 * (if one is running) know that they should exit. Also wake up
	 * the heartbeat checker thread in case it's sleeping.
966 967
	 */
	xpc_exiting = 1;
968
	wake_up_interruptible(&xpc_activate_IRQ_wq);
969

970
	/* wait for the discovery thread to exit */
J
Jes Sorensen 已提交
971
	wait_for_completion(&xpc_discovery_exited);
972

973
	/* wait for the heartbeat checker thread to exit */
J
Jes Sorensen 已提交
974
	wait_for_completion(&xpc_hb_checker_exited);
975

976
	/* sleep for a 1/3 of a second or so */
977
	(void)msleep_interruptible(300);
978 979 980

	/* wait for all partitions to become inactive */

981 982
	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
	xpc_disengage_timedout = 0;
983

984 985 986
	do {
		active_part_count = 0;

987
		for (partid = 0; partid < xp_max_npartitions; partid++) {
988 989
			part = &xpc_partitions[partid];

990
			if (xpc_partition_disengaged(part) &&
991
			    part->act_state == XPC_P_AS_INACTIVE) {
992
				continue;
993
			}
994 995 996 997

			active_part_count++;

			XPC_DEACTIVATE_PARTITION(part, reason);
998

999 1000
			if (part->disengage_timeout > disengage_timeout)
				disengage_timeout = part->disengage_timeout;
1001
		}
1002

R
Robin Holt 已提交
1003
		if (xpc_arch_ops.any_partition_engaged()) {
1004
			if (time_is_before_jiffies(printmsg_time)) {
1005
				dev_info(xpc_part, "waiting for remote "
1006 1007 1008
					 "partitions to deactivate, timeout in "
					 "%ld seconds\n", (disengage_timeout -
					 jiffies) / HZ);
1009
				printmsg_time = jiffies +
1010
				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
1011 1012 1013 1014 1015 1016
				printed_waiting_msg = 1;
			}

		} else if (active_part_count > 0) {
			if (printed_waiting_msg) {
				dev_info(xpc_part, "waiting for local partition"
1017
					 " to deactivate\n");
1018 1019 1020 1021
				printed_waiting_msg = 0;
			}

		} else {
1022
			if (!xpc_disengage_timedout) {
1023
				dev_info(xpc_part, "all partitions have "
1024
					 "deactivated\n");
1025 1026
			}
			break;
1027 1028
		}

1029
		/* sleep for a 1/3 of a second or so */
1030
		(void)msleep_interruptible(300);
1031 1032 1033

	} while (1);

R
Robin Holt 已提交
1034
	DBUG_ON(xpc_arch_ops.any_partition_engaged());
1035

1036
	xpc_teardown_rsvd_page();
1037

1038
	if (reason == xpUnloading) {
1039
		(void)unregister_die_notifier(&xpc_die_notifier);
1040
		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1041
	}
1042

1043 1044 1045
	/* clear the interface to XPC's functions */
	xpc_clear_interface();

1046
	if (xpc_sysctl)
1047
		unregister_sysctl_table(xpc_sysctl);
1048

1049
	xpc_teardown_partitions();
1050 1051 1052

	if (is_shub())
		xpc_exit_sn2();
1053
	else if (is_uv())
1054
		xpc_exit_uv();
1055 1056
}

1057
/*
1058 1059 1060 1061 1062
 * This function is called when the system is being rebooted.
 */
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
1063
	enum xp_retval reason;
1064 1065 1066

	switch (event) {
	case SYS_RESTART:
1067
		reason = xpSystemReboot;
1068 1069
		break;
	case SYS_HALT:
1070
		reason = xpSystemHalt;
1071 1072
		break;
	case SYS_POWER_OFF:
1073
		reason = xpSystemPoweroff;
1074 1075
		break;
	default:
1076
		reason = xpSystemGoingDown;
1077 1078 1079 1080 1081 1082
	}

	xpc_do_exit(reason);
	return NOTIFY_DONE;
}

R
Robin Holt 已提交
1083 1084 1085
/* Used to only allow one cpu to complete disconnect */
static unsigned int xpc_die_disconnecting;

1086
/*
1087 1088
 * Notify other partitions to deactivate from us by first disengaging from all
 * references to our memory.
1089 1090
 */
static void
1091
xpc_die_deactivate(void)
1092 1093
{
	struct xpc_partition *part;
1094
	short partid;
1095
	int any_engaged;
1096 1097
	long keep_waiting;
	long wait_to_print;
1098

R
Robin Holt 已提交
1099 1100 1101
	if (cmpxchg(&xpc_die_disconnecting, 0, 1))
		return;

1102 1103 1104
	/* keep xpc_hb_checker thread from doing anything (just in case) */
	xpc_exiting = 1;

R
Robin Holt 已提交
1105
	xpc_arch_ops.disallow_all_hbs();   /*indicate we're deactivated */
1106

1107
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1108 1109
		part = &xpc_partitions[partid];

R
Robin Holt 已提交
1110
		if (xpc_arch_ops.partition_engaged(partid) ||
1111
		    part->act_state != XPC_P_AS_INACTIVE) {
R
Robin Holt 已提交
1112 1113
			xpc_arch_ops.request_partition_deactivation(part);
			xpc_arch_ops.indicate_partition_disengaged(part);
1114 1115 1116
		}
	}

1117 1118
	/*
	 * Though we requested that all other partitions deactivate from us,
1119 1120 1121 1122 1123 1124
	 * we only wait until they've all disengaged or we've reached the
	 * defined timelimit.
	 *
	 * Given that one iteration through the following while-loop takes
	 * approximately 200 microseconds, calculate the #of loops to take
	 * before bailing and the #of loops before printing a waiting message.
1125
	 */
1126 1127
	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1128

1129
	while (1) {
R
Robin Holt 已提交
1130
		any_engaged = xpc_arch_ops.any_partition_engaged();
1131 1132
		if (!any_engaged) {
			dev_info(xpc_part, "all partitions have deactivated\n");
1133 1134
			break;
		}
1135

1136
		if (!keep_waiting--) {
1137 1138
			for (partid = 0; partid < xp_max_npartitions;
			     partid++) {
R
Robin Holt 已提交
1139
				if (xpc_arch_ops.partition_engaged(partid)) {
1140
					dev_info(xpc_part, "deactivate from "
1141 1142
						 "remote partition %d timed "
						 "out\n", partid);
1143 1144 1145 1146 1147
				}
			}
			break;
		}

1148
		if (!wait_to_print--) {
1149
			dev_info(xpc_part, "waiting for remote partitions to "
1150
				 "deactivate, timeout in %ld seconds\n",
1151 1152 1153
				 keep_waiting / (1000 * 5));
			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
			    1000 * 5;
1154
		}
1155 1156

		udelay(200);
1157 1158 1159 1160
	}
}

/*
1161 1162 1163 1164 1165 1166
 * This function is called when the system is being restarted or halted due
 * to some sort of system failure. If this is the case we need to notify the
 * other partitions to disengage from all references to our memory.
 * This function can also be called when our heartbeater could be offlined
 * for a time. In this case we need to notify other partitions to not worry
 * about the lack of a heartbeat.
1167 1168
 */
static int
R
Robin Holt 已提交
1169
xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
1170
{
1171
#ifdef CONFIG_IA64		/* !!! temporary kludge */
1172 1173 1174
	switch (event) {
	case DIE_MACHINE_RESTART:
	case DIE_MACHINE_HALT:
1175
		xpc_die_deactivate();
1176
		break;
1177 1178 1179

	case DIE_KDEBUG_ENTER:
		/* Should lack of heartbeat be ignored by other partitions? */
1180
		if (!xpc_kdebug_ignore)
1181
			break;
1182

1183
		/* fall through */
1184 1185
	case DIE_MCA_MONARCH_ENTER:
	case DIE_INIT_MONARCH_ENTER:
R
Robin Holt 已提交
1186
		xpc_arch_ops.offline_heartbeat();
1187
		break;
1188 1189 1190

	case DIE_KDEBUG_LEAVE:
		/* Is lack of heartbeat being ignored by other partitions? */
1191
		if (!xpc_kdebug_ignore)
1192
			break;
1193

1194
		/* fall through */
1195 1196
	case DIE_MCA_MONARCH_LEAVE:
	case DIE_INIT_MONARCH_LEAVE:
R
Robin Holt 已提交
1197
		xpc_arch_ops.online_heartbeat();
1198 1199
		break;
	}
1200
#else
R
Robin Holt 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209
	struct die_args *die_args = _die_args;

	switch (event) {
	case DIE_TRAP:
		if (die_args->trapnr == X86_TRAP_DF)
			xpc_die_deactivate();

		if (((die_args->trapnr == X86_TRAP_MF) ||
		     (die_args->trapnr == X86_TRAP_XF)) &&
1210
		    !user_mode(die_args->regs))
R
Robin Holt 已提交
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
			xpc_die_deactivate();

		break;
	case DIE_INT3:
	case DIE_DEBUG:
		break;
	case DIE_OOPS:
	case DIE_GPF:
	default:
		xpc_die_deactivate();
	}
1222
#endif
1223 1224 1225 1226

	return NOTIFY_DONE;
}

1227 1228 1229 1230
int __init
xpc_init(void)
{
	int ret;
1231
	struct task_struct *kthread;
1232

1233 1234
	dev_set_name(xpc_part, "part");
	dev_set_name(xpc_chan, "chan");
1235

1236 1237 1238
	if (is_shub()) {
		/*
		 * The ia64-sn2 architecture supports at most 64 partitions.
1239
		 * And the inability to unregister remote amos restricts us
1240 1241 1242
		 * further to only support exactly 64 partitions on this
		 * architecture, no less.
		 */
1243 1244 1245 1246 1247 1248
		if (xp_max_npartitions != 64) {
			dev_err(xpc_part, "max #of partitions not set to 64\n");
			ret = -EINVAL;
		} else {
			ret = xpc_init_sn2();
		}
1249 1250

	} else if (is_uv()) {
1251
		ret = xpc_init_uv();
1252 1253

	} else {
1254
		ret = -ENODEV;
1255
	}
1256

1257 1258 1259 1260 1261
	if (ret != 0)
		return ret;

	ret = xpc_setup_partitions();
	if (ret != 0) {
1262
		dev_err(xpc_part, "can't get memory for partition structure\n");
1263
		goto out_1;
1264
	}
1265

1266 1267
	xpc_sysctl = register_sysctl_table(xpc_sys_dir);

1268 1269 1270 1271 1272
	/*
	 * Fill the partition reserved page with the information needed by
	 * other partitions to discover we are alive and establish initial
	 * communications.
	 */
1273 1274
	ret = xpc_setup_rsvd_page();
	if (ret != 0) {
1275
		dev_err(xpc_part, "can't setup our reserved page\n");
1276
		goto out_2;
1277 1278
	}

1279 1280
	/* add ourselves to the reboot_notifier_list */
	ret = register_reboot_notifier(&xpc_reboot_notifier);
1281
	if (ret != 0)
1282 1283
		dev_warn(xpc_part, "can't register reboot notifier\n");

1284
	/* add ourselves to the die_notifier list */
1285
	ret = register_die_notifier(&xpc_die_notifier);
1286
	if (ret != 0)
1287 1288
		dev_warn(xpc_part, "can't register die notifier\n");

1289 1290 1291 1292
	/*
	 * The real work-horse behind xpc.  This processes incoming
	 * interrupts and monitors remote heartbeats.
	 */
1293 1294
	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
	if (IS_ERR(kthread)) {
1295
		dev_err(xpc_part, "failed while forking hb check thread\n");
1296
		ret = -EBUSY;
1297
		goto out_3;
1298 1299 1300 1301 1302 1303 1304
	}

	/*
	 * Startup a thread that will attempt to discover other partitions to
	 * activate based on info provided by SAL. This new thread is short
	 * lived and will exit once discovery is complete.
	 */
1305 1306 1307
	kthread = kthread_run(xpc_initiate_discovery, NULL,
			      XPC_DISCOVERY_THREAD_NAME);
	if (IS_ERR(kthread)) {
1308 1309 1310
		dev_err(xpc_part, "failed while forking discovery thread\n");

		/* mark this new thread as a non-starter */
J
Jes Sorensen 已提交
1311
		complete(&xpc_discovery_exited);
1312

1313
		xpc_do_exit(xpUnloading);
1314 1315 1316 1317 1318
		return -EBUSY;
	}

	/* set the interface to point at XPC's functions */
	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1319 1320
			  xpc_initiate_send, xpc_initiate_send_notify,
			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1321 1322

	return 0;
1323 1324

	/* initialization was not successful */
1325
out_3:
1326
	xpc_teardown_rsvd_page();
1327

1328 1329
	(void)unregister_die_notifier(&xpc_die_notifier);
	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1330
out_2:
1331 1332
	if (xpc_sysctl)
		unregister_sysctl_table(xpc_sysctl);
1333 1334

	xpc_teardown_partitions();
1335 1336 1337
out_1:
	if (is_shub())
		xpc_exit_sn2();
1338
	else if (is_uv())
1339
		xpc_exit_uv();
1340
	return ret;
1341 1342
}

1343
module_init(xpc_init);
1344 1345 1346 1347

void __exit
xpc_exit(void)
{
1348
	xpc_do_exit(xpUnloading);
1349 1350
}

1351
module_exit(xpc_exit);
1352 1353 1354 1355 1356 1357 1358

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");

module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1359
		 "heartbeat increments.");
1360 1361 1362

module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1363
		 "heartbeat checks.");
1364

1365 1366 1367
module_param(xpc_disengage_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
		 "for disengage to complete.");
1368

1369 1370
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1371
		 "other partitions when dropping into kdebug.");