xpc_main.c 37.9 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 */

/*
 * Cross Partition Communication (XPC) support - standard version.
 *
 *	XPC provides a message passing capability that crosses partition
 *	boundaries. This module is made up of two parts:
 *
 *	    partition	This part detects the presence/absence of other
 *			partitions. It provides a heartbeat and monitors
 *			the heartbeats of other partitions.
 *
 *	    channel	This part manages the channels and sends/receives
 *			messages across them to/from other partitions.
 *
 *	There are a couple of additional functions residing in XP, which
 *	provide an interface to XPC for its users.
 *
 *
 *	Caveats:
 *
28
 *	  . Currently on sn2, we have no way to determine which nasid an IRQ
29 30 31 32 33
 *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
 *	    followed by an IPI. The amo indicates where data is to be pulled
 *	    from, so after the IPI arrives, the remote partition checks the amo
 *	    word. The IPI can actually arrive before the amo however, so other
 *	    code must periodically check for this case. Also, remote amo
34 35 36 37
 *	    operations do not reliably time out. Thus we do a remote PIO read
 *	    solely to know whether the remote partition is down and whether we
 *	    should stop sending IPIs to it. This remote PIO read operation is
 *	    set up in a special nofault region so SAL knows to ignore (and
38
 *	    cleanup) any errors due to the remote amo write, PIO read, and/or
39
 *	    PIO write operations.
40 41 42 43 44 45 46
 *
 *	    If/when new hardware solves this IPI problem, we should abandon
 *	    the current approach.
 *
 */

#include <linux/module.h>
47 48
#include <linux/sysctl.h>
#include <linux/device.h>
49
#include <linux/delay.h>
50
#include <linux/reboot.h>
51
#include <linux/kdebug.h>
52
#include <linux/kthread.h>
53
#include "xpc.h"
54 55 56 57 58 59 60 61

/* define two XPC debug device structures to be used with dev_dbg() et al */

struct device_driver xpc_dbg_name = {
	.name = "xpc"
};

struct device xpc_part_dbg_subname = {
62
	.init_name = "",	/* set to "part" at xpc_init() time */
63 64 65 66
	.driver = &xpc_dbg_name
};

struct device xpc_chan_dbg_subname = {
67
	.init_name = "",	/* set to "chan" at xpc_init() time */
68 69 70 71 72 73
	.driver = &xpc_dbg_name
};

struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;

74 75
static int xpc_kdebug_ignore;

76 77
/* systune related variables for /proc/sys directories */

78 79 80
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
81

82 83 84
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
85

86 87 88
int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit;	/* = 0 */
static int xpc_disengage_max_timelimit = 120;
89 90 91

static ctl_table xpc_sys_xpc_hb_dir[] = {
	{
92 93 94 95 96 97 98 99 100
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_interval",
	 .data = &xpc_hb_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_min_interval,
	 .extra2 = &xpc_hb_max_interval},
101
	{
102 103 104 105 106 107 108 109 110
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_check_interval",
	 .data = &xpc_hb_check_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_check_min_interval,
	 .extra2 = &xpc_hb_check_max_interval},
111
	{}
112 113 114
};
static ctl_table xpc_sys_xpc_dir[] = {
	{
115 116 117 118
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb",
	 .mode = 0555,
	 .child = xpc_sys_xpc_hb_dir},
119
	{
120
	 .ctl_name = CTL_UNNUMBERED,
121 122
	 .procname = "disengage_timelimit",
	 .data = &xpc_disengage_timelimit,
123 124 125 126
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
127 128
	 .extra1 = &xpc_disengage_min_timelimit,
	 .extra2 = &xpc_disengage_max_timelimit},
129
	{}
130 131 132
};
static ctl_table xpc_sys_dir[] = {
	{
133 134 135 136
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "xpc",
	 .mode = 0555,
	 .child = xpc_sys_xpc_dir},
137
	{}
138 139 140
};
static struct ctl_table_header *xpc_sysctl;

141 142
/* non-zero if any remote partition disengage was timed out */
int xpc_disengage_timedout;
143

144 145 146
/* #of activate IRQs received and not yet processed */
int xpc_activate_IRQ_rcvd;
DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
147 148

/* IRQ handler notifies this wait queue on receipt of an IRQ */
149
DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
150 151

static unsigned long xpc_hb_check_timeout;
152
static struct timer_list xpc_hb_timer;
153

154
/* notification that the xpc_hb_checker thread has exited */
J
Jes Sorensen 已提交
155
static DECLARE_COMPLETION(xpc_hb_checker_exited);
156

157
/* notification that the xpc_discovery thread has exited */
J
Jes Sorensen 已提交
158
static DECLARE_COMPLETION(xpc_discovery_exited);
159 160 161

static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);

162 163 164 165 166
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
	.notifier_call = xpc_system_reboot,
};

167 168 169 170 171
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
	.notifier_call = xpc_system_die,
};

172
int (*xpc_setup_partitions_sn) (void);
173
void (*xpc_teardown_partitions_sn) (void);
174 175 176
enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
						  unsigned long *rp_pa,
						  size_t *len);
177
int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp);
178 179 180 181

void (*xpc_allow_hb) (short partid);
void (*xpc_disallow_hb) (short partid);
void (*xpc_disallow_all_hbs) (void);
182 183 184 185 186
void (*xpc_heartbeat_init) (void);
void (*xpc_heartbeat_exit) (void);
void (*xpc_increment_heartbeat) (void);
void (*xpc_offline_heartbeat) (void);
void (*xpc_online_heartbeat) (void);
187
enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part);
188

189
enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
190
void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
191
u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
192 193
enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch);
void (*xpc_teardown_msg_structures) (struct xpc_channel *ch);
194
void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
195 196
int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch);
void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch);
197

198
void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
199 200
					  unsigned long remote_rp_pa,
					  int nasid);
201 202 203
void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
204

205 206 207
void (*xpc_process_activate_IRQ_rcvd) (void);
enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part);
void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part);
208

209 210 211 212 213 214
void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
int (*xpc_partition_engaged) (short partid);
int (*xpc_any_partition_engaged) (void);
void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
void (*xpc_assume_partition_disengaged) (short partid);

215
void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
216
				     unsigned long *irq_flags);
217 218 219
void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
				   unsigned long *irq_flags);
void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
220
				    unsigned long *irq_flags);
221 222
void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
				  unsigned long *irq_flags);
223

224 225
enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
					       unsigned long msgqueue_pa);
226

227 228 229 230 231
enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
				    void *payload, u16 payload_size,
				    u8 notify_type, xpc_notify_func func,
				    void *key);
void (*xpc_received_payload) (struct xpc_channel *ch, void *payload);
232

233
/*
234
 * Timer function to enforce the timelimit on the partition disengage.
235 236
 */
static void
237
xpc_timeout_partition_disengage(unsigned long data)
238
{
239
	struct xpc_partition *part = (struct xpc_partition *)data;
240

241
	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
242

243
	(void)xpc_partition_disengaged(part);
244

245 246
	DBUG_ON(part->disengage_timeout != 0);
	DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
247 248
}

249 250 251 252 253 254 255 256
/*
 * Timer to produce the heartbeat.  The timer structures function is
 * already set when this is initially called.  A tunable is used to
 * specify when the next timeout should occur.
 */
static void
xpc_hb_beater(unsigned long dummy)
{
257
	xpc_increment_heartbeat();
258

259
	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
260
		wake_up_interruptible(&xpc_activate_IRQ_wq);
261 262 263 264 265

	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
	add_timer(&xpc_hb_timer);
}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static void
xpc_start_hb_beater(void)
{
	xpc_heartbeat_init();
	init_timer(&xpc_hb_timer);
	xpc_hb_timer.function = xpc_hb_beater;
	xpc_hb_beater(0);
}

static void
xpc_stop_hb_beater(void)
{
	del_timer_sync(&xpc_hb_timer);
	xpc_heartbeat_exit();
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
/*
 * At periodic intervals, scan through all active partitions and ensure
 * their heartbeat is still active.  If not, the partition is deactivated.
 */
static void
xpc_check_remote_hb(void)
{
	struct xpc_partition *part;
	short partid;
	enum xp_retval ret;

	for (partid = 0; partid < xp_max_npartitions; partid++) {

		if (xpc_exiting)
			break;

		if (partid == xp_partition_id)
			continue;

		part = &xpc_partitions[partid];

303 304
		if (part->act_state == XPC_P_AS_INACTIVE ||
		    part->act_state == XPC_P_AS_DEACTIVATING) {
305 306 307 308 309 310 311 312 313
			continue;
		}

		ret = xpc_get_remote_heartbeat(part);
		if (ret != xpSuccess)
			XPC_DEACTIVATE_PARTITION(part, ret);
	}
}

314 315 316 317 318 319 320
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
321
	int force_IRQ = 0;
322 323 324

	/* this thread was marked active by xpc_hb_init() */

325
	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
326

327
	/* set our heartbeating to other partitions into motion */
328
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
329
	xpc_start_hb_beater();
330

331
	while (!xpc_exiting) {
332 333 334

		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
			"been received\n",
335
			(int)(xpc_hb_check_timeout - jiffies),
336
			xpc_activate_IRQ_rcvd);
337 338

		/* checking of remote heartbeats is skewed by IRQ handling */
339
		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
340 341 342
			xpc_hb_check_timeout = jiffies +
			    (xpc_hb_check_interval * HZ);

343 344 345 346
			dev_dbg(xpc_part, "checking remote heartbeats\n");
			xpc_check_remote_hb();

			/*
347 348
			 * On sn2 we need to periodically recheck to ensure no
			 * IRQ/amo pairs have been missed.
349
			 */
350 351
			if (is_shub())
				force_IRQ = 1;
352 353
		}

354
		/* check for outstanding IRQs */
355
		if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
356
			force_IRQ = 0;
357 358 359
			dev_dbg(xpc_part, "processing activate IRQs "
				"received\n");
			xpc_process_activate_IRQ_rcvd();
360
		}
361 362

		/* wait for IRQ or timeout */
363
		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
364
					       (time_is_before_eq_jiffies(
365
						xpc_hb_check_timeout) ||
366
						xpc_activate_IRQ_rcvd > 0 ||
367
						xpc_exiting));
368 369
	}

370 371
	xpc_stop_hb_beater();

372 373
	dev_dbg(xpc_part, "heartbeat checker is exiting\n");

374
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
375
	complete(&xpc_hb_checker_exited);
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	return 0;
}

/*
 * This thread will attempt to discover other partitions to activate
 * based on info provided by SAL. This new thread is short lived and
 * will exit once discovery is complete.
 */
static int
xpc_initiate_discovery(void *ignore)
{
	xpc_discovery();

	dev_dbg(xpc_part, "discovery thread is exiting\n");

391
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
392
	complete(&xpc_discovery_exited);
393 394 395 396 397
	return 0;
}

/*
 * The first kthread assigned to a newly activated partition is the one
398
 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
399 400 401 402 403 404 405 406 407 408 409 410
 * that kthread until the partition is brought down, at which time that kthread
 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
 * that XPC has dismantled all communication infrastructure for the associated
 * partition.) This kthread becomes the channel manager for that partition.
 *
 * Each active partition has a channel manager, who, besides connecting and
 * disconnecting channels, will ensure that each of the partition's connected
 * channels has the required number of assigned kthreads to get the work done.
 */
static void
xpc_channel_mgr(struct xpc_partition *part)
{
411
	while (part->act_state != XPC_P_AS_DEACTIVATING ||
412 413
	       atomic_read(&part->nchannels_active) > 0 ||
	       !xpc_partition_disengaged(part)) {
414

415
		xpc_process_sent_chctl_flags(part);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430

		/*
		 * Wait until we've been requested to activate kthreads or
		 * all of the channel's message queues have been torn down or
		 * a signal is pending.
		 *
		 * The channel_mgr_requests is set to 1 after being awakened,
		 * This is done to prevent the channel mgr from making one pass
		 * through the loop for each request, since he will
		 * be servicing all the requests in one pass. The reason it's
		 * set to 1 instead of 0 is so that other kthreads will know
		 * that the channel mgr is running and won't bother trying to
		 * wake him up.
		 */
		atomic_dec(&part->channel_mgr_requests);
431
		(void)wait_event_interruptible(part->channel_mgr_wq,
432
				(atomic_read(&part->channel_mgr_requests) > 0 ||
433
				 part->chctl.all_flags != 0 ||
434
				 (part->act_state == XPC_P_AS_DEACTIVATING &&
435 436
				 atomic_read(&part->nchannels_active) == 0 &&
				 xpc_partition_disengaged(part))));
437 438 439 440
		atomic_set(&part->channel_mgr_requests, 1);
	}
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
/*
 * Guarantee that the kzalloc'd memory is cacheline aligned.
 */
void *
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
	/* see if kzalloc will give us cachline aligned memory by default */
	*base = kzalloc(size, flags);
	if (*base == NULL)
		return NULL;

	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
		return *base;

	kfree(*base);

	/* nope, we'll have to do it ourselves */
	*base = kzalloc(size + L1_CACHE_BYTES, flags);
	if (*base == NULL)
		return NULL;

	return (void *)L1_CACHE_ALIGN((u64)*base);
}

/*
 * Setup the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static enum xp_retval
xpc_setup_ch_structures(struct xpc_partition *part)
{
	enum xp_retval ret;
	int ch_number;
	struct xpc_channel *ch;
	short partid = XPC_PARTID(part);

	/*
	 * Allocate all of the channel structures as a contiguous chunk of
	 * memory.
	 */
	DBUG_ON(part->channels != NULL);
	part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
				 GFP_KERNEL);
	if (part->channels == NULL) {
		dev_err(xpc_chan, "can't get memory for channels\n");
		return xpNoMemory;
	}

	/* allocate the remote open and close args */

	part->remote_openclose_args =
	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
					  GFP_KERNEL, &part->
					  remote_openclose_args_base);
	if (part->remote_openclose_args == NULL) {
		dev_err(xpc_chan, "can't get memory for remote connect args\n");
		ret = xpNoMemory;
		goto out_1;
	}

	part->chctl.all_flags = 0;
	spin_lock_init(&part->chctl_lock);

	atomic_set(&part->channel_mgr_requests, 1);
	init_waitqueue_head(&part->channel_mgr_wq);

	part->nchannels = XPC_MAX_NCHANNELS;

	atomic_set(&part->nchannels_active, 0);
	atomic_set(&part->nchannels_engaged, 0);

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		ch->partid = partid;
		ch->number = ch_number;
		ch->flags = XPC_C_DISCONNECTED;

		atomic_set(&ch->kthreads_assigned, 0);
		atomic_set(&ch->kthreads_idle, 0);
		atomic_set(&ch->kthreads_active, 0);

		atomic_set(&ch->references, 0);
		atomic_set(&ch->n_to_notify, 0);

		spin_lock_init(&ch->lock);
		init_completion(&ch->wdisconnect_wait);

		atomic_set(&ch->n_on_msg_allocate_wq, 0);
		init_waitqueue_head(&ch->msg_allocate_wq);
		init_waitqueue_head(&ch->idle_wq);
	}

	ret = xpc_setup_ch_structures_sn(part);
	if (ret != xpSuccess)
		goto out_2;

	/*
	 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
	 * we're declaring that this partition is ready to go.
	 */
	part->setup_state = XPC_P_SS_SETUP;

	return xpSuccess;

	/* setup of ch structures failed */
out_2:
	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
out_1:
	kfree(part->channels);
	part->channels = NULL;
	return ret;
}

/*
 * Teardown the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
	DBUG_ON(atomic_read(&part->nchannels_active) != 0);

	/*
	 * Make this partition inaccessible to local processes by marking it
	 * as no longer setup. Then wait before proceeding with the teardown
	 * until all existing references cease.
	 */
	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
	part->setup_state = XPC_P_SS_WTEARDOWN;

	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));

	/* now we can begin tearing down the infrastructure */

	xpc_teardown_ch_structures_sn(part);

	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
	kfree(part->channels);
	part->channels = NULL;

	part->setup_state = XPC_P_SS_TORNDOWN;
}

588 589 590 591 592 593 594
/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
595 596 597
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
598 599 600 601
 */
static int
xpc_activating(void *__partid)
{
602
	short partid = (u64)__partid;
603 604 605
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

606
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
607 608 609

	spin_lock_irqsave(&part->act_lock, irq_flags);

610 611
	if (part->act_state == XPC_P_AS_DEACTIVATING) {
		part->act_state = XPC_P_AS_INACTIVE;
612 613 614 615 616 617
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
618 619
	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
	part->act_state = XPC_P_AS_ACTIVATING;
620 621 622 623

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

624
	dev_dbg(xpc_part, "activating partition %d\n", partid);
625

626
	xpc_allow_hb(partid);
627

628
	if (xpc_setup_ch_structures(part) == xpSuccess) {
629 630 631 632 633 634 635 636 637
		(void)xpc_part_ref(part);	/* this will always succeed */

		if (xpc_make_first_contact(part) == xpSuccess) {
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
638
		xpc_teardown_ch_structures(part);
639
	}
640

641
	xpc_disallow_hb(partid);
642 643
	xpc_mark_partition_inactive(part);

644
	if (part->reason == xpReactivating) {
645
		/* interrupting ourselves results in activating partition */
646
		xpc_request_partition_reactivation(part);
647 648 649 650 651 652 653 654
	}

	return 0;
}

void
xpc_activate_partition(struct xpc_partition *part)
{
655
	short partid = XPC_PARTID(part);
656
	unsigned long irq_flags;
657
	struct task_struct *kthread;
658 659 660

	spin_lock_irqsave(&part->act_lock, irq_flags);

661
	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
662

663
	part->act_state = XPC_P_AS_ACTIVATION_REQ;
664
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
665 666

	spin_unlock_irqrestore(&part->act_lock, irq_flags);
667

668 669 670
	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
671
		spin_lock_irqsave(&part->act_lock, irq_flags);
672
		part->act_state = XPC_P_AS_INACTIVE;
673
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
674 675
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
}

void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

698
	if (needed <= 0)
699 700 701 702
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
703
		if (needed <= 0)
704 705 706 707 708 709
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

710
	xpc_create_kthreads(ch, needed, 0);
711 712 713 714 715 716 717 718 719 720 721
}

/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
	do {
		/* deliver messages to their intended recipients */

722
		while (xpc_n_of_deliverable_payloads(ch) > 0 &&
723
		       !(ch->flags & XPC_C_DISCONNECTING)) {
724
			xpc_deliver_payload(ch);
725 726 727
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
728
		    ch->kthreads_idle_limit) {
729 730 731 732 733 734 735 736
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

737
		(void)wait_event_interruptible_exclusive(ch->idle_wq,
738
				(xpc_n_of_deliverable_payloads(ch) > 0 ||
739
				 (ch->flags & XPC_C_DISCONNECTING)));
740 741 742

		atomic_dec(&ch->kthreads_idle);

743
	} while (!(ch->flags & XPC_C_DISCONNECTING));
744 745 746
}

static int
747
xpc_kthread_start(void *args)
748
{
749
	short partid = XPC_UNPACK_ARG1(args);
750 751 752 753
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
754
	unsigned long irq_flags;
755 756 757 758 759 760 761 762 763 764

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

765
		spin_lock_irqsave(&ch->lock, irq_flags);
766 767
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
768 769
			spin_unlock_irqrestore(&ch->lock, irq_flags);

770 771
			xpc_connected_callout(ch);

772 773 774 775
			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

776 777 778 779 780 781 782
			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
783
			n_needed = xpc_n_of_deliverable_payloads(ch) - 1;
784
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
785
				xpc_activate_kthreads(ch, n_needed);
786

787 788
		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
789 790 791 792 793
		}

		xpc_kthread_waitmsgs(part, ch);
	}

794
	/* let registerer know that connection is disconnecting */
795

796 797
	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
798
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
799
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
800
		spin_unlock_irqrestore(&ch->lock, irq_flags);
801

802
		xpc_disconnect_callout(ch, xpDisconnecting);
803 804 805 806 807 808

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

809 810 811
	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
	    atomic_dec_return(&part->nchannels_engaged) == 0) {
		xpc_indicate_partition_disengaged(part);
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}

/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
836
xpc_create_kthreads(struct xpc_channel *ch, int needed,
837
		    int ignore_disconnecting)
838 839 840
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
841
	struct xpc_partition *part = &xpc_partitions[ch->partid];
842
	struct task_struct *kthread;
843 844

	while (needed-- > 0) {
845 846 847 848 849 850

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
851 852 853 854
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
855
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
856 857 858 859 860 861
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

862 863 864
		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
				xpc_indicate_partition_engaged(part);
865
		}
866
		(void)xpc_part_ref(part);
867 868
		xpc_msgqueue_ref(ch);

869 870 871
		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
872
			/* the fork failed */
873 874 875 876 877 878 879

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
880
			 * them is the xpDisconnecting callout that this
881
			 * failed kthread_run() would have made.
882 883
			 */

884 885
			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
886
				xpc_indicate_partition_disengaged(part);
887 888 889
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
890 891

			if (atomic_read(&ch->kthreads_assigned) <
892
			    ch->kthreads_idle_limit) {
893 894 895 896 897 898
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
899
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
900
						       &irq_flags);
901 902 903 904 905 906 907 908 909 910
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}

void
xpc_disconnect_wait(int ch_number)
{
911
	unsigned long irq_flags;
912
	short partid;
913 914
	struct xpc_partition *part;
	struct xpc_channel *ch;
915
	int wakeup_channel_mgr;
916 917

	/* now wait for all callouts to the caller's function to cease */
918
	for (partid = 0; partid < xp_max_npartitions; partid++) {
919 920
		part = &xpc_partitions[partid];

921
		if (!xpc_part_ref(part))
922
			continue;
923

924
		ch = &part->channels[ch_number];
925

926
		if (!(ch->flags & XPC_C_WDISCONNECT)) {
927
			xpc_part_deref(part);
928
			continue;
929
		}
930

J
Jes Sorensen 已提交
931
		wait_for_completion(&ch->wdisconnect_wait);
932 933 934 935 936

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

937
		if (ch->delayed_chctl_flags) {
938
			if (part->act_state != XPC_P_AS_DEACTIVATING) {
939 940 941 942
				spin_lock(&part->chctl_lock);
				part->chctl.flags[ch->number] |=
				    ch->delayed_chctl_flags;
				spin_unlock(&part->chctl_lock);
943 944
				wakeup_channel_mgr = 1;
			}
945
			ch->delayed_chctl_flags = 0;
946
		}
947 948 949 950

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

951
		if (wakeup_channel_mgr)
952 953 954
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
955 956 957
	}
}

958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
static int
xpc_setup_partitions(void)
{
	short partid;
	struct xpc_partition *part;

	xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
				 xp_max_npartitions, GFP_KERNEL);
	if (xpc_partitions == NULL) {
		dev_err(xpc_part, "can't get memory for partition structure\n");
		return -ENOMEM;
	}

	/*
	 * The first few fields of each entry of xpc_partitions[] need to
	 * be initialized now so that calls to xpc_connect() and
	 * xpc_disconnect() can be made prior to the activation of any remote
	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
	 * PARTITION HAS BEEN ACTIVATED.
	 */
	for (partid = 0; partid < xp_max_npartitions; partid++) {
		part = &xpc_partitions[partid];

		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));

		part->activate_IRQ_rcvd = 0;
		spin_lock_init(&part->act_lock);
		part->act_state = XPC_P_AS_INACTIVE;
		XPC_SET_REASON(part, 0, 0);

		init_timer(&part->disengage_timer);
		part->disengage_timer.function =
		    xpc_timeout_partition_disengage;
		part->disengage_timer.data = (unsigned long)part;

		part->setup_state = XPC_P_SS_UNSET;
		init_waitqueue_head(&part->teardown_wq);
		atomic_set(&part->references, 0);
	}

	return xpc_setup_partitions_sn();
}

static void
xpc_teardown_partitions(void)
{
1005
	xpc_teardown_partitions_sn();
1006 1007 1008
	kfree(xpc_partitions);
}

1009
static void
1010
xpc_do_exit(enum xp_retval reason)
1011
{
1012
	short partid;
1013
	int active_part_count, printed_waiting_msg = 0;
1014
	struct xpc_partition *part;
1015
	unsigned long printmsg_time, disengage_timeout = 0;
1016

1017 1018
	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
	DBUG_ON(xpc_exiting == 1);
1019 1020

	/*
1021 1022 1023
	 * Let the heartbeat checker thread and the discovery thread
	 * (if one is running) know that they should exit. Also wake up
	 * the heartbeat checker thread in case it's sleeping.
1024 1025
	 */
	xpc_exiting = 1;
1026
	wake_up_interruptible(&xpc_activate_IRQ_wq);
1027

1028
	/* wait for the discovery thread to exit */
J
Jes Sorensen 已提交
1029
	wait_for_completion(&xpc_discovery_exited);
1030

1031
	/* wait for the heartbeat checker thread to exit */
J
Jes Sorensen 已提交
1032
	wait_for_completion(&xpc_hb_checker_exited);
1033

1034
	/* sleep for a 1/3 of a second or so */
1035
	(void)msleep_interruptible(300);
1036 1037 1038

	/* wait for all partitions to become inactive */

1039 1040
	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
	xpc_disengage_timedout = 0;
1041

1042 1043 1044
	do {
		active_part_count = 0;

1045
		for (partid = 0; partid < xp_max_npartitions; partid++) {
1046 1047
			part = &xpc_partitions[partid];

1048
			if (xpc_partition_disengaged(part) &&
1049
			    part->act_state == XPC_P_AS_INACTIVE) {
1050
				continue;
1051
			}
1052 1053 1054 1055

			active_part_count++;

			XPC_DEACTIVATE_PARTITION(part, reason);
1056

1057 1058
			if (part->disengage_timeout > disengage_timeout)
				disengage_timeout = part->disengage_timeout;
1059
		}
1060

1061
		if (xpc_any_partition_engaged()) {
1062
			if (time_is_before_jiffies(printmsg_time)) {
1063
				dev_info(xpc_part, "waiting for remote "
1064 1065 1066
					 "partitions to deactivate, timeout in "
					 "%ld seconds\n", (disengage_timeout -
					 jiffies) / HZ);
1067
				printmsg_time = jiffies +
1068
				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
1069 1070 1071 1072 1073 1074
				printed_waiting_msg = 1;
			}

		} else if (active_part_count > 0) {
			if (printed_waiting_msg) {
				dev_info(xpc_part, "waiting for local partition"
1075
					 " to deactivate\n");
1076 1077 1078 1079
				printed_waiting_msg = 0;
			}

		} else {
1080
			if (!xpc_disengage_timedout) {
1081
				dev_info(xpc_part, "all partitions have "
1082
					 "deactivated\n");
1083 1084
			}
			break;
1085 1086
		}

1087
		/* sleep for a 1/3 of a second or so */
1088
		(void)msleep_interruptible(300);
1089 1090 1091

	} while (1);

1092
	DBUG_ON(xpc_any_partition_engaged());
1093

1094
	xpc_teardown_rsvd_page();
1095

1096
	if (reason == xpUnloading) {
1097
		(void)unregister_die_notifier(&xpc_die_notifier);
1098
		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1099
	}
1100

1101 1102 1103
	/* clear the interface to XPC's functions */
	xpc_clear_interface();

1104
	if (xpc_sysctl)
1105
		unregister_sysctl_table(xpc_sysctl);
1106

1107
	xpc_teardown_partitions();
1108 1109 1110

	if (is_shub())
		xpc_exit_sn2();
1111
	else if (is_uv())
1112
		xpc_exit_uv();
1113 1114
}

1115
/*
1116 1117 1118 1119 1120
 * This function is called when the system is being rebooted.
 */
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
1121
	enum xp_retval reason;
1122 1123 1124

	switch (event) {
	case SYS_RESTART:
1125
		reason = xpSystemReboot;
1126 1127
		break;
	case SYS_HALT:
1128
		reason = xpSystemHalt;
1129 1130
		break;
	case SYS_POWER_OFF:
1131
		reason = xpSystemPoweroff;
1132 1133
		break;
	default:
1134
		reason = xpSystemGoingDown;
1135 1136 1137 1138 1139 1140 1141
	}

	xpc_do_exit(reason);
	return NOTIFY_DONE;
}

/*
1142 1143
 * Notify other partitions to deactivate from us by first disengaging from all
 * references to our memory.
1144 1145
 */
static void
1146
xpc_die_deactivate(void)
1147 1148
{
	struct xpc_partition *part;
1149
	short partid;
1150
	int any_engaged;
1151 1152
	long keep_waiting;
	long wait_to_print;
1153 1154 1155 1156

	/* keep xpc_hb_checker thread from doing anything (just in case) */
	xpc_exiting = 1;

1157
	xpc_disallow_all_hbs();	/*indicate we're deactivated */
1158

1159
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1160 1161
		part = &xpc_partitions[partid];

1162
		if (xpc_partition_engaged(partid) ||
1163
		    part->act_state != XPC_P_AS_INACTIVE) {
1164 1165
			xpc_request_partition_deactivation(part);
			xpc_indicate_partition_disengaged(part);
1166 1167 1168
		}
	}

1169 1170
	/*
	 * Though we requested that all other partitions deactivate from us,
1171 1172 1173 1174 1175 1176
	 * we only wait until they've all disengaged or we've reached the
	 * defined timelimit.
	 *
	 * Given that one iteration through the following while-loop takes
	 * approximately 200 microseconds, calculate the #of loops to take
	 * before bailing and the #of loops before printing a waiting message.
1177
	 */
1178 1179
	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1180

1181
	while (1) {
1182 1183 1184
		any_engaged = xpc_any_partition_engaged();
		if (!any_engaged) {
			dev_info(xpc_part, "all partitions have deactivated\n");
1185 1186
			break;
		}
1187

1188
		if (!keep_waiting--) {
1189 1190
			for (partid = 0; partid < xp_max_npartitions;
			     partid++) {
1191 1192
				if (xpc_partition_engaged(partid)) {
					dev_info(xpc_part, "deactivate from "
1193 1194
						 "remote partition %d timed "
						 "out\n", partid);
1195 1196 1197 1198 1199
				}
			}
			break;
		}

1200
		if (!wait_to_print--) {
1201
			dev_info(xpc_part, "waiting for remote partitions to "
1202
				 "deactivate, timeout in %ld seconds\n",
1203 1204 1205
				 keep_waiting / (1000 * 5));
			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
			    1000 * 5;
1206
		}
1207 1208

		udelay(200);
1209 1210 1211 1212
	}
}

/*
1213 1214 1215 1216 1217 1218
 * This function is called when the system is being restarted or halted due
 * to some sort of system failure. If this is the case we need to notify the
 * other partitions to disengage from all references to our memory.
 * This function can also be called when our heartbeater could be offlined
 * for a time. In this case we need to notify other partitions to not worry
 * about the lack of a heartbeat.
1219 1220 1221 1222
 */
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
{
1223
#ifdef CONFIG_IA64		/* !!! temporary kludge */
1224 1225 1226
	switch (event) {
	case DIE_MACHINE_RESTART:
	case DIE_MACHINE_HALT:
1227
		xpc_die_deactivate();
1228
		break;
1229 1230 1231

	case DIE_KDEBUG_ENTER:
		/* Should lack of heartbeat be ignored by other partitions? */
1232
		if (!xpc_kdebug_ignore)
1233
			break;
1234

1235
		/* fall through */
1236 1237
	case DIE_MCA_MONARCH_ENTER:
	case DIE_INIT_MONARCH_ENTER:
1238
		xpc_offline_heartbeat();
1239
		break;
1240 1241 1242

	case DIE_KDEBUG_LEAVE:
		/* Is lack of heartbeat being ignored by other partitions? */
1243
		if (!xpc_kdebug_ignore)
1244
			break;
1245

1246
		/* fall through */
1247 1248
	case DIE_MCA_MONARCH_LEAVE:
	case DIE_INIT_MONARCH_LEAVE:
1249
		xpc_online_heartbeat();
1250 1251
		break;
	}
1252 1253 1254
#else
	xpc_die_deactivate();
#endif
1255 1256 1257 1258

	return NOTIFY_DONE;
}

1259 1260 1261 1262
int __init
xpc_init(void)
{
	int ret;
1263
	struct task_struct *kthread;
1264

1265 1266
	dev_set_name(xpc_part, "part");
	dev_set_name(xpc_chan, "chan");
1267

1268 1269 1270
	if (is_shub()) {
		/*
		 * The ia64-sn2 architecture supports at most 64 partitions.
1271
		 * And the inability to unregister remote amos restricts us
1272 1273 1274
		 * further to only support exactly 64 partitions on this
		 * architecture, no less.
		 */
1275 1276 1277 1278 1279 1280
		if (xp_max_npartitions != 64) {
			dev_err(xpc_part, "max #of partitions not set to 64\n");
			ret = -EINVAL;
		} else {
			ret = xpc_init_sn2();
		}
1281 1282

	} else if (is_uv()) {
1283
		ret = xpc_init_uv();
1284 1285

	} else {
1286
		ret = -ENODEV;
1287
	}
1288

1289 1290 1291 1292 1293
	if (ret != 0)
		return ret;

	ret = xpc_setup_partitions();
	if (ret != 0) {
1294
		dev_err(xpc_part, "can't get memory for partition structure\n");
1295
		goto out_1;
1296
	}
1297

1298 1299
	xpc_sysctl = register_sysctl_table(xpc_sys_dir);

1300 1301 1302 1303 1304
	/*
	 * Fill the partition reserved page with the information needed by
	 * other partitions to discover we are alive and establish initial
	 * communications.
	 */
1305 1306
	ret = xpc_setup_rsvd_page();
	if (ret != 0) {
1307
		dev_err(xpc_part, "can't setup our reserved page\n");
1308
		goto out_2;
1309 1310
	}

1311 1312
	/* add ourselves to the reboot_notifier_list */
	ret = register_reboot_notifier(&xpc_reboot_notifier);
1313
	if (ret != 0)
1314 1315
		dev_warn(xpc_part, "can't register reboot notifier\n");

1316
	/* add ourselves to the die_notifier list */
1317
	ret = register_die_notifier(&xpc_die_notifier);
1318
	if (ret != 0)
1319 1320
		dev_warn(xpc_part, "can't register die notifier\n");

1321 1322 1323 1324
	/*
	 * The real work-horse behind xpc.  This processes incoming
	 * interrupts and monitors remote heartbeats.
	 */
1325 1326
	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
	if (IS_ERR(kthread)) {
1327
		dev_err(xpc_part, "failed while forking hb check thread\n");
1328
		ret = -EBUSY;
1329
		goto out_3;
1330 1331 1332 1333 1334 1335 1336
	}

	/*
	 * Startup a thread that will attempt to discover other partitions to
	 * activate based on info provided by SAL. This new thread is short
	 * lived and will exit once discovery is complete.
	 */
1337 1338 1339
	kthread = kthread_run(xpc_initiate_discovery, NULL,
			      XPC_DISCOVERY_THREAD_NAME);
	if (IS_ERR(kthread)) {
1340 1341 1342
		dev_err(xpc_part, "failed while forking discovery thread\n");

		/* mark this new thread as a non-starter */
J
Jes Sorensen 已提交
1343
		complete(&xpc_discovery_exited);
1344

1345
		xpc_do_exit(xpUnloading);
1346 1347 1348 1349 1350
		return -EBUSY;
	}

	/* set the interface to point at XPC's functions */
	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1351 1352
			  xpc_initiate_send, xpc_initiate_send_notify,
			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1353 1354

	return 0;
1355 1356

	/* initialization was not successful */
1357
out_3:
1358
	xpc_teardown_rsvd_page();
1359

1360 1361
	(void)unregister_die_notifier(&xpc_die_notifier);
	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1362
out_2:
1363 1364
	if (xpc_sysctl)
		unregister_sysctl_table(xpc_sysctl);
1365 1366

	xpc_teardown_partitions();
1367 1368 1369
out_1:
	if (is_shub())
		xpc_exit_sn2();
1370
	else if (is_uv())
1371
		xpc_exit_uv();
1372
	return ret;
1373 1374
}

1375
module_init(xpc_init);
1376 1377 1378 1379

void __exit
xpc_exit(void)
{
1380
	xpc_do_exit(xpUnloading);
1381 1382
}

1383
module_exit(xpc_exit);
1384 1385 1386 1387 1388 1389 1390

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");

module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1391
		 "heartbeat increments.");
1392 1393 1394

module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1395
		 "heartbeat checks.");
1396

1397 1398 1399
module_param(xpc_disengage_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
		 "for disengage to complete.");
1400

1401 1402
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1403
		 "other partitions when dropping into kdebug.");