xpc_main.c 37.9 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 */

/*
 * Cross Partition Communication (XPC) support - standard version.
 *
 *	XPC provides a message passing capability that crosses partition
 *	boundaries. This module is made up of two parts:
 *
 *	    partition	This part detects the presence/absence of other
 *			partitions. It provides a heartbeat and monitors
 *			the heartbeats of other partitions.
 *
 *	    channel	This part manages the channels and sends/receives
 *			messages across them to/from other partitions.
 *
 *	There are a couple of additional functions residing in XP, which
 *	provide an interface to XPC for its users.
 *
 *
 *	Caveats:
 *
28
 *	  . Currently on sn2, we have no way to determine which nasid an IRQ
29 30 31 32 33
 *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
 *	    followed by an IPI. The amo indicates where data is to be pulled
 *	    from, so after the IPI arrives, the remote partition checks the amo
 *	    word. The IPI can actually arrive before the amo however, so other
 *	    code must periodically check for this case. Also, remote amo
34 35 36 37
 *	    operations do not reliably time out. Thus we do a remote PIO read
 *	    solely to know whether the remote partition is down and whether we
 *	    should stop sending IPIs to it. This remote PIO read operation is
 *	    set up in a special nofault region so SAL knows to ignore (and
38
 *	    cleanup) any errors due to the remote amo write, PIO read, and/or
39
 *	    PIO write operations.
40 41 42 43 44 45 46
 *
 *	    If/when new hardware solves this IPI problem, we should abandon
 *	    the current approach.
 *
 */

#include <linux/module.h>
47 48
#include <linux/sysctl.h>
#include <linux/device.h>
49
#include <linux/delay.h>
50
#include <linux/reboot.h>
51
#include <linux/kdebug.h>
52
#include <linux/kthread.h>
53
#include "xpc.h"
54 55 56 57 58 59 60 61

/* define two XPC debug device structures to be used with dev_dbg() et al */

struct device_driver xpc_dbg_name = {
	.name = "xpc"
};

struct device xpc_part_dbg_subname = {
62
	.init_name = "",	/* set to "part" at xpc_init() time */
63 64 65 66
	.driver = &xpc_dbg_name
};

struct device xpc_chan_dbg_subname = {
67
	.init_name = "",	/* set to "chan" at xpc_init() time */
68 69 70 71 72 73
	.driver = &xpc_dbg_name
};

struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;

74 75
static int xpc_kdebug_ignore;

76 77
/* systune related variables for /proc/sys directories */

78 79 80
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
81

82 83 84
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
85

86 87 88
int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit;	/* = 0 */
static int xpc_disengage_max_timelimit = 120;
89 90 91

static ctl_table xpc_sys_xpc_hb_dir[] = {
	{
92 93 94 95 96 97 98 99 100
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_interval",
	 .data = &xpc_hb_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_min_interval,
	 .extra2 = &xpc_hb_max_interval},
101
	{
102 103 104 105 106 107 108 109 110
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_check_interval",
	 .data = &xpc_hb_check_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_check_min_interval,
	 .extra2 = &xpc_hb_check_max_interval},
111
	{}
112 113 114
};
static ctl_table xpc_sys_xpc_dir[] = {
	{
115 116 117 118
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb",
	 .mode = 0555,
	 .child = xpc_sys_xpc_hb_dir},
119
	{
120
	 .ctl_name = CTL_UNNUMBERED,
121 122
	 .procname = "disengage_timelimit",
	 .data = &xpc_disengage_timelimit,
123 124 125 126
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
127 128
	 .extra1 = &xpc_disengage_min_timelimit,
	 .extra2 = &xpc_disengage_max_timelimit},
129
	{}
130 131 132
};
static ctl_table xpc_sys_dir[] = {
	{
133 134 135 136
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "xpc",
	 .mode = 0555,
	 .child = xpc_sys_xpc_dir},
137
	{}
138 139 140
};
static struct ctl_table_header *xpc_sysctl;

141 142
/* non-zero if any remote partition disengage was timed out */
int xpc_disengage_timedout;
143

144 145 146
/* #of activate IRQs received and not yet processed */
int xpc_activate_IRQ_rcvd;
DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
147 148

/* IRQ handler notifies this wait queue on receipt of an IRQ */
149
DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
150 151

static unsigned long xpc_hb_check_timeout;
152 153
static struct timer_list xpc_hb_timer;
void *xpc_heartbeating_to_mask;
154

155
/* notification that the xpc_hb_checker thread has exited */
J
Jes Sorensen 已提交
156
static DECLARE_COMPLETION(xpc_hb_checker_exited);
157

158
/* notification that the xpc_discovery thread has exited */
J
Jes Sorensen 已提交
159
static DECLARE_COMPLETION(xpc_discovery_exited);
160 161 162

static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);

163 164 165 166 167
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
	.notifier_call = xpc_system_reboot,
};

168 169 170 171 172
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
	.notifier_call = xpc_system_die,
};

173
int (*xpc_setup_partitions_sn) (void);
174
void (*xpc_teardown_partitions_sn) (void);
175 176 177
enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
						  unsigned long *rp_pa,
						  size_t *len);
178
int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp);
179 180 181 182 183
void (*xpc_heartbeat_init) (void);
void (*xpc_heartbeat_exit) (void);
void (*xpc_increment_heartbeat) (void);
void (*xpc_offline_heartbeat) (void);
void (*xpc_online_heartbeat) (void);
184
enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part);
185

186
enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187
void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
188
u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
189 190
enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch);
void (*xpc_teardown_msg_structures) (struct xpc_channel *ch);
191
void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
192 193
int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch);
void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch);
194

195
void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
196 197
					  unsigned long remote_rp_pa,
					  int nasid);
198 199 200
void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
201

202 203 204
void (*xpc_process_activate_IRQ_rcvd) (void);
enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part);
void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part);
205

206 207 208 209 210 211
void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
int (*xpc_partition_engaged) (short partid);
int (*xpc_any_partition_engaged) (void);
void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
void (*xpc_assume_partition_disengaged) (short partid);

212
void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
213
				     unsigned long *irq_flags);
214 215 216
void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
				   unsigned long *irq_flags);
void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
217
				    unsigned long *irq_flags);
218 219
void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
				  unsigned long *irq_flags);
220

221 222
enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
					       unsigned long msgqueue_pa);
223

224 225 226 227 228
enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
				    void *payload, u16 payload_size,
				    u8 notify_type, xpc_notify_func func,
				    void *key);
void (*xpc_received_payload) (struct xpc_channel *ch, void *payload);
229

230
/*
231
 * Timer function to enforce the timelimit on the partition disengage.
232 233
 */
static void
234
xpc_timeout_partition_disengage(unsigned long data)
235
{
236
	struct xpc_partition *part = (struct xpc_partition *)data;
237

238
	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
239

240
	(void)xpc_partition_disengaged(part);
241

242 243
	DBUG_ON(part->disengage_timeout != 0);
	DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
244 245
}

246 247 248 249 250 251 252 253
/*
 * Timer to produce the heartbeat.  The timer structures function is
 * already set when this is initially called.  A tunable is used to
 * specify when the next timeout should occur.
 */
static void
xpc_hb_beater(unsigned long dummy)
{
254
	xpc_increment_heartbeat();
255

256
	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
257
		wake_up_interruptible(&xpc_activate_IRQ_wq);
258 259 260 261 262

	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
	add_timer(&xpc_hb_timer);
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static void
xpc_start_hb_beater(void)
{
	xpc_heartbeat_init();
	init_timer(&xpc_hb_timer);
	xpc_hb_timer.function = xpc_hb_beater;
	xpc_hb_beater(0);
}

static void
xpc_stop_hb_beater(void)
{
	del_timer_sync(&xpc_hb_timer);
	xpc_heartbeat_exit();
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
/*
 * At periodic intervals, scan through all active partitions and ensure
 * their heartbeat is still active.  If not, the partition is deactivated.
 */
static void
xpc_check_remote_hb(void)
{
	struct xpc_partition *part;
	short partid;
	enum xp_retval ret;

	for (partid = 0; partid < xp_max_npartitions; partid++) {

		if (xpc_exiting)
			break;

		if (partid == xp_partition_id)
			continue;

		part = &xpc_partitions[partid];

300 301
		if (part->act_state == XPC_P_AS_INACTIVE ||
		    part->act_state == XPC_P_AS_DEACTIVATING) {
302 303 304 305 306 307 308 309 310
			continue;
		}

		ret = xpc_get_remote_heartbeat(part);
		if (ret != xpSuccess)
			XPC_DEACTIVATE_PARTITION(part, ret);
	}
}

311 312 313 314 315 316 317
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
318
	int force_IRQ = 0;
319 320 321

	/* this thread was marked active by xpc_hb_init() */

322
	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
323

324
	/* set our heartbeating to other partitions into motion */
325
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
326
	xpc_start_hb_beater();
327

328
	while (!xpc_exiting) {
329 330 331

		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
			"been received\n",
332
			(int)(xpc_hb_check_timeout - jiffies),
333
			xpc_activate_IRQ_rcvd);
334 335

		/* checking of remote heartbeats is skewed by IRQ handling */
336
		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
337 338 339
			xpc_hb_check_timeout = jiffies +
			    (xpc_hb_check_interval * HZ);

340 341 342 343
			dev_dbg(xpc_part, "checking remote heartbeats\n");
			xpc_check_remote_hb();

			/*
344 345
			 * On sn2 we need to periodically recheck to ensure no
			 * IRQ/amo pairs have been missed.
346
			 */
347 348
			if (is_shub())
				force_IRQ = 1;
349 350
		}

351
		/* check for outstanding IRQs */
352
		if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
353
			force_IRQ = 0;
354 355 356
			dev_dbg(xpc_part, "processing activate IRQs "
				"received\n");
			xpc_process_activate_IRQ_rcvd();
357
		}
358 359

		/* wait for IRQ or timeout */
360
		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
361
					       (time_is_before_eq_jiffies(
362
						xpc_hb_check_timeout) ||
363
						xpc_activate_IRQ_rcvd > 0 ||
364
						xpc_exiting));
365 366
	}

367 368
	xpc_stop_hb_beater();

369 370
	dev_dbg(xpc_part, "heartbeat checker is exiting\n");

371
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
372
	complete(&xpc_hb_checker_exited);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	return 0;
}

/*
 * This thread will attempt to discover other partitions to activate
 * based on info provided by SAL. This new thread is short lived and
 * will exit once discovery is complete.
 */
static int
xpc_initiate_discovery(void *ignore)
{
	xpc_discovery();

	dev_dbg(xpc_part, "discovery thread is exiting\n");

388
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
389
	complete(&xpc_discovery_exited);
390 391 392 393 394
	return 0;
}

/*
 * The first kthread assigned to a newly activated partition is the one
395
 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
396 397 398 399 400 401 402 403 404 405 406 407
 * that kthread until the partition is brought down, at which time that kthread
 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
 * that XPC has dismantled all communication infrastructure for the associated
 * partition.) This kthread becomes the channel manager for that partition.
 *
 * Each active partition has a channel manager, who, besides connecting and
 * disconnecting channels, will ensure that each of the partition's connected
 * channels has the required number of assigned kthreads to get the work done.
 */
static void
xpc_channel_mgr(struct xpc_partition *part)
{
408
	while (part->act_state != XPC_P_AS_DEACTIVATING ||
409 410
	       atomic_read(&part->nchannels_active) > 0 ||
	       !xpc_partition_disengaged(part)) {
411

412
		xpc_process_sent_chctl_flags(part);
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427

		/*
		 * Wait until we've been requested to activate kthreads or
		 * all of the channel's message queues have been torn down or
		 * a signal is pending.
		 *
		 * The channel_mgr_requests is set to 1 after being awakened,
		 * This is done to prevent the channel mgr from making one pass
		 * through the loop for each request, since he will
		 * be servicing all the requests in one pass. The reason it's
		 * set to 1 instead of 0 is so that other kthreads will know
		 * that the channel mgr is running and won't bother trying to
		 * wake him up.
		 */
		atomic_dec(&part->channel_mgr_requests);
428
		(void)wait_event_interruptible(part->channel_mgr_wq,
429
				(atomic_read(&part->channel_mgr_requests) > 0 ||
430
				 part->chctl.all_flags != 0 ||
431
				 (part->act_state == XPC_P_AS_DEACTIVATING &&
432 433
				 atomic_read(&part->nchannels_active) == 0 &&
				 xpc_partition_disengaged(part))));
434 435 436 437
		atomic_set(&part->channel_mgr_requests, 1);
	}
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
/*
 * Guarantee that the kzalloc'd memory is cacheline aligned.
 */
void *
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
	/* see if kzalloc will give us cachline aligned memory by default */
	*base = kzalloc(size, flags);
	if (*base == NULL)
		return NULL;

	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
		return *base;

	kfree(*base);

	/* nope, we'll have to do it ourselves */
	*base = kzalloc(size + L1_CACHE_BYTES, flags);
	if (*base == NULL)
		return NULL;

	return (void *)L1_CACHE_ALIGN((u64)*base);
}

/*
 * Setup the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static enum xp_retval
xpc_setup_ch_structures(struct xpc_partition *part)
{
	enum xp_retval ret;
	int ch_number;
	struct xpc_channel *ch;
	short partid = XPC_PARTID(part);

	/*
	 * Allocate all of the channel structures as a contiguous chunk of
	 * memory.
	 */
	DBUG_ON(part->channels != NULL);
	part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
				 GFP_KERNEL);
	if (part->channels == NULL) {
		dev_err(xpc_chan, "can't get memory for channels\n");
		return xpNoMemory;
	}

	/* allocate the remote open and close args */

	part->remote_openclose_args =
	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
					  GFP_KERNEL, &part->
					  remote_openclose_args_base);
	if (part->remote_openclose_args == NULL) {
		dev_err(xpc_chan, "can't get memory for remote connect args\n");
		ret = xpNoMemory;
		goto out_1;
	}

	part->chctl.all_flags = 0;
	spin_lock_init(&part->chctl_lock);

	atomic_set(&part->channel_mgr_requests, 1);
	init_waitqueue_head(&part->channel_mgr_wq);

	part->nchannels = XPC_MAX_NCHANNELS;

	atomic_set(&part->nchannels_active, 0);
	atomic_set(&part->nchannels_engaged, 0);

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		ch->partid = partid;
		ch->number = ch_number;
		ch->flags = XPC_C_DISCONNECTED;

		atomic_set(&ch->kthreads_assigned, 0);
		atomic_set(&ch->kthreads_idle, 0);
		atomic_set(&ch->kthreads_active, 0);

		atomic_set(&ch->references, 0);
		atomic_set(&ch->n_to_notify, 0);

		spin_lock_init(&ch->lock);
		init_completion(&ch->wdisconnect_wait);

		atomic_set(&ch->n_on_msg_allocate_wq, 0);
		init_waitqueue_head(&ch->msg_allocate_wq);
		init_waitqueue_head(&ch->idle_wq);
	}

	ret = xpc_setup_ch_structures_sn(part);
	if (ret != xpSuccess)
		goto out_2;

	/*
	 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
	 * we're declaring that this partition is ready to go.
	 */
	part->setup_state = XPC_P_SS_SETUP;

	return xpSuccess;

	/* setup of ch structures failed */
out_2:
	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
out_1:
	kfree(part->channels);
	part->channels = NULL;
	return ret;
}

/*
 * Teardown the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
	DBUG_ON(atomic_read(&part->nchannels_active) != 0);

	/*
	 * Make this partition inaccessible to local processes by marking it
	 * as no longer setup. Then wait before proceeding with the teardown
	 * until all existing references cease.
	 */
	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
	part->setup_state = XPC_P_SS_WTEARDOWN;

	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));

	/* now we can begin tearing down the infrastructure */

	xpc_teardown_ch_structures_sn(part);

	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
	kfree(part->channels);
	part->channels = NULL;

	part->setup_state = XPC_P_SS_TORNDOWN;
}

585 586 587 588 589 590 591
/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
592 593 594
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
595 596 597 598
 */
static int
xpc_activating(void *__partid)
{
599
	short partid = (u64)__partid;
600 601 602
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

603
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
604 605 606

	spin_lock_irqsave(&part->act_lock, irq_flags);

607 608
	if (part->act_state == XPC_P_AS_DEACTIVATING) {
		part->act_state = XPC_P_AS_INACTIVE;
609 610 611 612 613 614
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
615 616
	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
	part->act_state = XPC_P_AS_ACTIVATING;
617 618 619 620

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

621
	dev_dbg(xpc_part, "activating partition %d\n", partid);
622

623
	xpc_allow_hb(partid);
624

625
	if (xpc_setup_ch_structures(part) == xpSuccess) {
626 627 628 629 630 631 632 633 634
		(void)xpc_part_ref(part);	/* this will always succeed */

		if (xpc_make_first_contact(part) == xpSuccess) {
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
635
		xpc_teardown_ch_structures(part);
636
	}
637

638
	xpc_disallow_hb(partid);
639 640
	xpc_mark_partition_inactive(part);

641
	if (part->reason == xpReactivating) {
642
		/* interrupting ourselves results in activating partition */
643
		xpc_request_partition_reactivation(part);
644 645 646 647 648 649 650 651
	}

	return 0;
}

void
xpc_activate_partition(struct xpc_partition *part)
{
652
	short partid = XPC_PARTID(part);
653
	unsigned long irq_flags;
654
	struct task_struct *kthread;
655 656 657

	spin_lock_irqsave(&part->act_lock, irq_flags);

658
	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
659

660
	part->act_state = XPC_P_AS_ACTIVATION_REQ;
661
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
662 663

	spin_unlock_irqrestore(&part->act_lock, irq_flags);
664

665 666 667
	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
668
		spin_lock_irqsave(&part->act_lock, irq_flags);
669
		part->act_state = XPC_P_AS_INACTIVE;
670
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
671 672
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
}

void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

695
	if (needed <= 0)
696 697 698 699
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
700
		if (needed <= 0)
701 702 703 704 705 706
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

707
	xpc_create_kthreads(ch, needed, 0);
708 709 710 711 712 713 714 715 716 717 718
}

/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
	do {
		/* deliver messages to their intended recipients */

719
		while (xpc_n_of_deliverable_payloads(ch) > 0 &&
720
		       !(ch->flags & XPC_C_DISCONNECTING)) {
721
			xpc_deliver_payload(ch);
722 723 724
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
725
		    ch->kthreads_idle_limit) {
726 727 728 729 730 731 732 733
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

734
		(void)wait_event_interruptible_exclusive(ch->idle_wq,
735
				(xpc_n_of_deliverable_payloads(ch) > 0 ||
736
				 (ch->flags & XPC_C_DISCONNECTING)));
737 738 739

		atomic_dec(&ch->kthreads_idle);

740
	} while (!(ch->flags & XPC_C_DISCONNECTING));
741 742 743
}

static int
744
xpc_kthread_start(void *args)
745
{
746
	short partid = XPC_UNPACK_ARG1(args);
747 748 749 750
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
751
	unsigned long irq_flags;
752 753 754 755 756 757 758 759 760 761

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

762
		spin_lock_irqsave(&ch->lock, irq_flags);
763 764
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
765 766
			spin_unlock_irqrestore(&ch->lock, irq_flags);

767 768
			xpc_connected_callout(ch);

769 770 771 772
			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

773 774 775 776 777 778 779
			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
780
			n_needed = xpc_n_of_deliverable_payloads(ch) - 1;
781
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
782
				xpc_activate_kthreads(ch, n_needed);
783

784 785
		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
786 787 788 789 790
		}

		xpc_kthread_waitmsgs(part, ch);
	}

791
	/* let registerer know that connection is disconnecting */
792

793 794
	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
795
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
796
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
797
		spin_unlock_irqrestore(&ch->lock, irq_flags);
798

799
		xpc_disconnect_callout(ch, xpDisconnecting);
800 801 802 803 804 805

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

806 807 808
	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
	    atomic_dec_return(&part->nchannels_engaged) == 0) {
		xpc_indicate_partition_disengaged(part);
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}

/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
833
xpc_create_kthreads(struct xpc_channel *ch, int needed,
834
		    int ignore_disconnecting)
835 836 837
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
838
	struct xpc_partition *part = &xpc_partitions[ch->partid];
839
	struct task_struct *kthread;
840 841

	while (needed-- > 0) {
842 843 844 845 846 847

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
848 849 850 851
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
852
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
853 854 855 856 857 858
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

859 860 861
		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
				xpc_indicate_partition_engaged(part);
862
		}
863
		(void)xpc_part_ref(part);
864 865
		xpc_msgqueue_ref(ch);

866 867 868
		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
869
			/* the fork failed */
870 871 872 873 874 875 876

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
877
			 * them is the xpDisconnecting callout that this
878
			 * failed kthread_run() would have made.
879 880
			 */

881 882
			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
883
				xpc_indicate_partition_disengaged(part);
884 885 886
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
887 888

			if (atomic_read(&ch->kthreads_assigned) <
889
			    ch->kthreads_idle_limit) {
890 891 892 893 894 895
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
896
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
897
						       &irq_flags);
898 899 900 901 902 903 904 905 906 907
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}

void
xpc_disconnect_wait(int ch_number)
{
908
	unsigned long irq_flags;
909
	short partid;
910 911
	struct xpc_partition *part;
	struct xpc_channel *ch;
912
	int wakeup_channel_mgr;
913 914

	/* now wait for all callouts to the caller's function to cease */
915
	for (partid = 0; partid < xp_max_npartitions; partid++) {
916 917
		part = &xpc_partitions[partid];

918
		if (!xpc_part_ref(part))
919
			continue;
920

921
		ch = &part->channels[ch_number];
922

923
		if (!(ch->flags & XPC_C_WDISCONNECT)) {
924
			xpc_part_deref(part);
925
			continue;
926
		}
927

J
Jes Sorensen 已提交
928
		wait_for_completion(&ch->wdisconnect_wait);
929 930 931 932 933

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

934
		if (ch->delayed_chctl_flags) {
935
			if (part->act_state != XPC_P_AS_DEACTIVATING) {
936 937 938 939
				spin_lock(&part->chctl_lock);
				part->chctl.flags[ch->number] |=
				    ch->delayed_chctl_flags;
				spin_unlock(&part->chctl_lock);
940 941
				wakeup_channel_mgr = 1;
			}
942
			ch->delayed_chctl_flags = 0;
943
		}
944 945 946 947

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

948
		if (wakeup_channel_mgr)
949 950 951
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
952 953 954
	}
}

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
static int
xpc_setup_partitions(void)
{
	short partid;
	struct xpc_partition *part;

	xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
				 xp_max_npartitions, GFP_KERNEL);
	if (xpc_partitions == NULL) {
		dev_err(xpc_part, "can't get memory for partition structure\n");
		return -ENOMEM;
	}

	/*
	 * The first few fields of each entry of xpc_partitions[] need to
	 * be initialized now so that calls to xpc_connect() and
	 * xpc_disconnect() can be made prior to the activation of any remote
	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
	 * PARTITION HAS BEEN ACTIVATED.
	 */
	for (partid = 0; partid < xp_max_npartitions; partid++) {
		part = &xpc_partitions[partid];

		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));

		part->activate_IRQ_rcvd = 0;
		spin_lock_init(&part->act_lock);
		part->act_state = XPC_P_AS_INACTIVE;
		XPC_SET_REASON(part, 0, 0);

		init_timer(&part->disengage_timer);
		part->disengage_timer.function =
		    xpc_timeout_partition_disengage;
		part->disengage_timer.data = (unsigned long)part;

		part->setup_state = XPC_P_SS_UNSET;
		init_waitqueue_head(&part->teardown_wq);
		atomic_set(&part->references, 0);
	}

	return xpc_setup_partitions_sn();
}

static void
xpc_teardown_partitions(void)
{
1002
	xpc_teardown_partitions_sn();
1003 1004 1005
	kfree(xpc_partitions);
}

1006
static void
1007
xpc_do_exit(enum xp_retval reason)
1008
{
1009
	short partid;
1010
	int active_part_count, printed_waiting_msg = 0;
1011
	struct xpc_partition *part;
1012
	unsigned long printmsg_time, disengage_timeout = 0;
1013

1014 1015
	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
	DBUG_ON(xpc_exiting == 1);
1016 1017

	/*
1018 1019 1020
	 * Let the heartbeat checker thread and the discovery thread
	 * (if one is running) know that they should exit. Also wake up
	 * the heartbeat checker thread in case it's sleeping.
1021 1022
	 */
	xpc_exiting = 1;
1023
	wake_up_interruptible(&xpc_activate_IRQ_wq);
1024

1025
	/* wait for the discovery thread to exit */
J
Jes Sorensen 已提交
1026
	wait_for_completion(&xpc_discovery_exited);
1027

1028
	/* wait for the heartbeat checker thread to exit */
J
Jes Sorensen 已提交
1029
	wait_for_completion(&xpc_hb_checker_exited);
1030

1031
	/* sleep for a 1/3 of a second or so */
1032
	(void)msleep_interruptible(300);
1033 1034 1035

	/* wait for all partitions to become inactive */

1036 1037
	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
	xpc_disengage_timedout = 0;
1038

1039 1040 1041
	do {
		active_part_count = 0;

1042
		for (partid = 0; partid < xp_max_npartitions; partid++) {
1043 1044
			part = &xpc_partitions[partid];

1045
			if (xpc_partition_disengaged(part) &&
1046
			    part->act_state == XPC_P_AS_INACTIVE) {
1047
				continue;
1048
			}
1049 1050 1051 1052

			active_part_count++;

			XPC_DEACTIVATE_PARTITION(part, reason);
1053

1054 1055
			if (part->disengage_timeout > disengage_timeout)
				disengage_timeout = part->disengage_timeout;
1056
		}
1057

1058
		if (xpc_any_partition_engaged()) {
1059
			if (time_is_before_jiffies(printmsg_time)) {
1060
				dev_info(xpc_part, "waiting for remote "
1061 1062 1063
					 "partitions to deactivate, timeout in "
					 "%ld seconds\n", (disengage_timeout -
					 jiffies) / HZ);
1064
				printmsg_time = jiffies +
1065
				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
1066 1067 1068 1069 1070 1071
				printed_waiting_msg = 1;
			}

		} else if (active_part_count > 0) {
			if (printed_waiting_msg) {
				dev_info(xpc_part, "waiting for local partition"
1072
					 " to deactivate\n");
1073 1074 1075 1076
				printed_waiting_msg = 0;
			}

		} else {
1077
			if (!xpc_disengage_timedout) {
1078
				dev_info(xpc_part, "all partitions have "
1079
					 "deactivated\n");
1080 1081
			}
			break;
1082 1083
		}

1084
		/* sleep for a 1/3 of a second or so */
1085
		(void)msleep_interruptible(300);
1086 1087 1088

	} while (1);

1089
	DBUG_ON(xpc_any_partition_engaged());
1090
	DBUG_ON(xpc_any_hbs_allowed() != 0);
1091

1092
	xpc_teardown_rsvd_page();
1093

1094
	if (reason == xpUnloading) {
1095
		(void)unregister_die_notifier(&xpc_die_notifier);
1096
		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1097
	}
1098

1099 1100 1101
	/* clear the interface to XPC's functions */
	xpc_clear_interface();

1102
	if (xpc_sysctl)
1103
		unregister_sysctl_table(xpc_sysctl);
1104

1105
	xpc_teardown_partitions();
1106 1107 1108

	if (is_shub())
		xpc_exit_sn2();
1109
	else if (is_uv())
1110
		xpc_exit_uv();
1111 1112
}

1113
/*
1114 1115 1116 1117 1118
 * This function is called when the system is being rebooted.
 */
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
1119
	enum xp_retval reason;
1120 1121 1122

	switch (event) {
	case SYS_RESTART:
1123
		reason = xpSystemReboot;
1124 1125
		break;
	case SYS_HALT:
1126
		reason = xpSystemHalt;
1127 1128
		break;
	case SYS_POWER_OFF:
1129
		reason = xpSystemPoweroff;
1130 1131
		break;
	default:
1132
		reason = xpSystemGoingDown;
1133 1134 1135 1136 1137 1138 1139
	}

	xpc_do_exit(reason);
	return NOTIFY_DONE;
}

/*
1140 1141
 * Notify other partitions to deactivate from us by first disengaging from all
 * references to our memory.
1142 1143
 */
static void
1144
xpc_die_deactivate(void)
1145 1146
{
	struct xpc_partition *part;
1147
	short partid;
1148
	int any_engaged;
1149 1150
	long keep_waiting;
	long wait_to_print;
1151 1152 1153 1154

	/* keep xpc_hb_checker thread from doing anything (just in case) */
	xpc_exiting = 1;

1155
	xpc_disallow_all_hbs();	/*indicate we're deactivated */
1156

1157
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1158 1159
		part = &xpc_partitions[partid];

1160
		if (xpc_partition_engaged(partid) ||
1161
		    part->act_state != XPC_P_AS_INACTIVE) {
1162 1163
			xpc_request_partition_deactivation(part);
			xpc_indicate_partition_disengaged(part);
1164 1165 1166
		}
	}

1167 1168
	/*
	 * Though we requested that all other partitions deactivate from us,
1169 1170 1171 1172 1173 1174
	 * we only wait until they've all disengaged or we've reached the
	 * defined timelimit.
	 *
	 * Given that one iteration through the following while-loop takes
	 * approximately 200 microseconds, calculate the #of loops to take
	 * before bailing and the #of loops before printing a waiting message.
1175
	 */
1176 1177
	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1178

1179
	while (1) {
1180 1181 1182
		any_engaged = xpc_any_partition_engaged();
		if (!any_engaged) {
			dev_info(xpc_part, "all partitions have deactivated\n");
1183 1184
			break;
		}
1185

1186
		if (!keep_waiting--) {
1187 1188
			for (partid = 0; partid < xp_max_npartitions;
			     partid++) {
1189 1190
				if (xpc_partition_engaged(partid)) {
					dev_info(xpc_part, "deactivate from "
1191 1192
						 "remote partition %d timed "
						 "out\n", partid);
1193 1194 1195 1196 1197
				}
			}
			break;
		}

1198
		if (!wait_to_print--) {
1199
			dev_info(xpc_part, "waiting for remote partitions to "
1200
				 "deactivate, timeout in %ld seconds\n",
1201 1202 1203
				 keep_waiting / (1000 * 5));
			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
			    1000 * 5;
1204
		}
1205 1206

		udelay(200);
1207 1208 1209 1210
	}
}

/*
1211 1212 1213 1214 1215 1216
 * This function is called when the system is being restarted or halted due
 * to some sort of system failure. If this is the case we need to notify the
 * other partitions to disengage from all references to our memory.
 * This function can also be called when our heartbeater could be offlined
 * for a time. In this case we need to notify other partitions to not worry
 * about the lack of a heartbeat.
1217 1218 1219 1220
 */
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
{
1221
#ifdef CONFIG_IA64		/* !!! temporary kludge */
1222 1223 1224
	switch (event) {
	case DIE_MACHINE_RESTART:
	case DIE_MACHINE_HALT:
1225
		xpc_die_deactivate();
1226
		break;
1227 1228 1229

	case DIE_KDEBUG_ENTER:
		/* Should lack of heartbeat be ignored by other partitions? */
1230
		if (!xpc_kdebug_ignore)
1231
			break;
1232

1233
		/* fall through */
1234 1235
	case DIE_MCA_MONARCH_ENTER:
	case DIE_INIT_MONARCH_ENTER:
1236
		xpc_offline_heartbeat();
1237
		break;
1238 1239 1240

	case DIE_KDEBUG_LEAVE:
		/* Is lack of heartbeat being ignored by other partitions? */
1241
		if (!xpc_kdebug_ignore)
1242
			break;
1243

1244
		/* fall through */
1245 1246
	case DIE_MCA_MONARCH_LEAVE:
	case DIE_INIT_MONARCH_LEAVE:
1247
		xpc_online_heartbeat();
1248 1249
		break;
	}
1250 1251 1252
#else
	xpc_die_deactivate();
#endif
1253 1254 1255 1256

	return NOTIFY_DONE;
}

1257 1258 1259 1260
int __init
xpc_init(void)
{
	int ret;
1261
	struct task_struct *kthread;
1262

1263 1264
	dev_set_name(xpc_part, "part");
	dev_set_name(xpc_chan, "chan");
1265

1266 1267 1268
	if (is_shub()) {
		/*
		 * The ia64-sn2 architecture supports at most 64 partitions.
1269
		 * And the inability to unregister remote amos restricts us
1270 1271 1272
		 * further to only support exactly 64 partitions on this
		 * architecture, no less.
		 */
1273 1274 1275 1276 1277 1278
		if (xp_max_npartitions != 64) {
			dev_err(xpc_part, "max #of partitions not set to 64\n");
			ret = -EINVAL;
		} else {
			ret = xpc_init_sn2();
		}
1279 1280

	} else if (is_uv()) {
1281
		ret = xpc_init_uv();
1282 1283

	} else {
1284
		ret = -ENODEV;
1285
	}
1286

1287 1288 1289 1290 1291
	if (ret != 0)
		return ret;

	ret = xpc_setup_partitions();
	if (ret != 0) {
1292
		dev_err(xpc_part, "can't get memory for partition structure\n");
1293
		goto out_1;
1294
	}
1295

1296 1297
	xpc_sysctl = register_sysctl_table(xpc_sys_dir);

1298 1299 1300 1301 1302
	/*
	 * Fill the partition reserved page with the information needed by
	 * other partitions to discover we are alive and establish initial
	 * communications.
	 */
1303 1304
	ret = xpc_setup_rsvd_page();
	if (ret != 0) {
1305
		dev_err(xpc_part, "can't setup our reserved page\n");
1306
		goto out_2;
1307 1308
	}

1309 1310
	/* add ourselves to the reboot_notifier_list */
	ret = register_reboot_notifier(&xpc_reboot_notifier);
1311
	if (ret != 0)
1312 1313
		dev_warn(xpc_part, "can't register reboot notifier\n");

1314
	/* add ourselves to the die_notifier list */
1315
	ret = register_die_notifier(&xpc_die_notifier);
1316
	if (ret != 0)
1317 1318
		dev_warn(xpc_part, "can't register die notifier\n");

1319 1320 1321 1322
	/*
	 * The real work-horse behind xpc.  This processes incoming
	 * interrupts and monitors remote heartbeats.
	 */
1323 1324
	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
	if (IS_ERR(kthread)) {
1325
		dev_err(xpc_part, "failed while forking hb check thread\n");
1326
		ret = -EBUSY;
1327
		goto out_3;
1328 1329 1330 1331 1332 1333 1334
	}

	/*
	 * Startup a thread that will attempt to discover other partitions to
	 * activate based on info provided by SAL. This new thread is short
	 * lived and will exit once discovery is complete.
	 */
1335 1336 1337
	kthread = kthread_run(xpc_initiate_discovery, NULL,
			      XPC_DISCOVERY_THREAD_NAME);
	if (IS_ERR(kthread)) {
1338 1339 1340
		dev_err(xpc_part, "failed while forking discovery thread\n");

		/* mark this new thread as a non-starter */
J
Jes Sorensen 已提交
1341
		complete(&xpc_discovery_exited);
1342

1343
		xpc_do_exit(xpUnloading);
1344 1345 1346 1347 1348
		return -EBUSY;
	}

	/* set the interface to point at XPC's functions */
	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1349 1350
			  xpc_initiate_send, xpc_initiate_send_notify,
			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1351 1352

	return 0;
1353 1354

	/* initialization was not successful */
1355
out_3:
1356
	xpc_teardown_rsvd_page();
1357

1358 1359
	(void)unregister_die_notifier(&xpc_die_notifier);
	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1360
out_2:
1361 1362
	if (xpc_sysctl)
		unregister_sysctl_table(xpc_sysctl);
1363 1364

	xpc_teardown_partitions();
1365 1366 1367
out_1:
	if (is_shub())
		xpc_exit_sn2();
1368
	else if (is_uv())
1369
		xpc_exit_uv();
1370
	return ret;
1371 1372
}

1373
module_init(xpc_init);
1374 1375 1376 1377

void __exit
xpc_exit(void)
{
1378
	xpc_do_exit(xpUnloading);
1379 1380
}

1381
module_exit(xpc_exit);
1382 1383 1384 1385 1386 1387 1388

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");

module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1389
		 "heartbeat increments.");
1390 1391 1392

module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1393
		 "heartbeat checks.");
1394

1395 1396 1397
module_param(xpc_disengage_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
		 "for disengage to complete.");
1398

1399 1400
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1401
		 "other partitions when dropping into kdebug.");