xpc_main.c 37.0 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
 */

/*
 * Cross Partition Communication (XPC) support - standard version.
 *
 *	XPC provides a message passing capability that crosses partition
 *	boundaries. This module is made up of two parts:
 *
 *	    partition	This part detects the presence/absence of other
 *			partitions. It provides a heartbeat and monitors
 *			the heartbeats of other partitions.
 *
 *	    channel	This part manages the channels and sends/receives
 *			messages across them to/from other partitions.
 *
 *	There are a couple of additional functions residing in XP, which
 *	provide an interface to XPC for its users.
 *
 *
 *	Caveats:
 *
 *	  . We currently have no way to determine which nasid an IPI came
 *	    from. Thus, xpc_IPI_send() does a remote AMO write followed by
 *	    an IPI. The AMO indicates where data is to be pulled from, so
 *	    after the IPI arrives, the remote partition checks the AMO word.
 *	    The IPI can actually arrive before the AMO however, so other code
 *	    must periodically check for this case. Also, remote AMO operations
 *	    do not reliably time out. Thus we do a remote PIO read solely to
 *	    know whether the remote partition is down and whether we should
 *	    stop sending IPIs to it. This remote PIO read operation is set up
 *	    in a special nofault region so SAL knows to ignore (and cleanup)
 *	    any errors due to the remote AMO write, PIO read, and/or PIO
 *	    write operations.
 *
 *	    If/when new hardware solves this IPI problem, we should abandon
 *	    the current approach.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
51
#include <linux/delay.h>
52
#include <linux/reboot.h>
J
Jes Sorensen 已提交
53
#include <linux/completion.h>
54
#include <linux/kdebug.h>
55 56
#include <linux/kthread.h>
#include <linux/uaccess.h>
57 58
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
59
#include "xpc.h"
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

/* define two XPC debug device structures to be used with dev_dbg() et al */

struct device_driver xpc_dbg_name = {
	.name = "xpc"
};

struct device xpc_part_dbg_subname = {
	.bus_id = {0},		/* set to "part" at xpc_init() time */
	.driver = &xpc_dbg_name
};

struct device xpc_chan_dbg_subname = {
	.bus_id = {0},		/* set to "chan" at xpc_init() time */
	.driver = &xpc_dbg_name
};

struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;

80 81
static int xpc_kdebug_ignore;

82 83
/* systune related variables for /proc/sys directories */

84 85 86
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
87

88 89 90
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
91

92
int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
93
static int xpc_disengage_request_min_timelimit;	/* = 0 */
94
static int xpc_disengage_request_max_timelimit = 120;
95 96 97

static ctl_table xpc_sys_xpc_hb_dir[] = {
	{
98 99 100 101 102 103 104 105 106
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_interval",
	 .data = &xpc_hb_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_min_interval,
	 .extra2 = &xpc_hb_max_interval},
107
	{
108 109 110 111 112 113 114 115 116
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb_check_interval",
	 .data = &xpc_hb_check_interval,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_hb_check_min_interval,
	 .extra2 = &xpc_hb_check_max_interval},
117
	{}
118 119 120
};
static ctl_table xpc_sys_xpc_dir[] = {
	{
121 122 123 124
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "hb",
	 .mode = 0555,
	 .child = xpc_sys_xpc_hb_dir},
125
	{
126 127 128 129 130 131 132 133 134
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "disengage_request_timelimit",
	 .data = &xpc_disengage_request_timelimit,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = &proc_dointvec_minmax,
	 .strategy = &sysctl_intvec,
	 .extra1 = &xpc_disengage_request_min_timelimit,
	 .extra2 = &xpc_disengage_request_max_timelimit},
135
	{}
136 137 138
};
static ctl_table xpc_sys_dir[] = {
	{
139 140 141 142
	 .ctl_name = CTL_UNNUMBERED,
	 .procname = "xpc",
	 .mode = 0555,
	 .child = xpc_sys_xpc_dir},
143
	{}
144 145 146
};
static struct ctl_table_header *xpc_sysctl;

147 148
/* non-zero if any remote partition disengage request was timed out */
int xpc_disengage_request_timedout;
149 150

/* #of IRQs received */
151
atomic_t xpc_act_IRQ_rcvd;
152 153

/* IRQ handler notifies this wait queue on receipt of an IRQ */
154
DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
155 156

static unsigned long xpc_hb_check_timeout;
157 158
static struct timer_list xpc_hb_timer;
void *xpc_heartbeating_to_mask;
159

160
/* notification that the xpc_hb_checker thread has exited */
J
Jes Sorensen 已提交
161
static DECLARE_COMPLETION(xpc_hb_checker_exited);
162

163
/* notification that the xpc_discovery thread has exited */
J
Jes Sorensen 已提交
164
static DECLARE_COMPLETION(xpc_discovery_exited);
165 166 167

static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);

168 169 170 171 172
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
	.notifier_call = xpc_system_reboot,
};

173 174 175 176 177
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
	.notifier_call = xpc_system_die,
};

178
enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
179 180 181 182 183 184 185
void (*xpc_heartbeat_init) (void);
void (*xpc_heartbeat_exit) (void);
void (*xpc_increment_heartbeat) (void);
void (*xpc_offline_heartbeat) (void);
void (*xpc_online_heartbeat) (void);
void (*xpc_check_remote_hb) (void);

186 187 188
enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
u64 (*xpc_get_IPI_flags) (struct xpc_partition *part);
struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
189 190 191 192 193

void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp,
					   u64 remote_rp_pa, int nasid);

void (*xpc_process_act_IRQ_rcvd) (int n_IRQs_expected);
194 195 196
enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
void (*xpc_teardown_infrastructure) (struct xpc_partition *part);

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
void (*xpc_mark_partition_engaged) (struct xpc_partition *part);
void (*xpc_mark_partition_disengaged) (struct xpc_partition *part);
void (*xpc_request_partition_disengage) (struct xpc_partition *part);
void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *part);
u64 (*xpc_partition_engaged) (u64 partid_mask);
u64 (*xpc_partition_disengage_requested) (u64 partid_mask);
void (*xpc_clear_partition_engaged) (u64 partid_mask);
void (*xpc_clear_partition_disengage_request) (u64 partid_mask);

void (*xpc_IPI_send_local_activate) (int from_nasid);
void (*xpc_IPI_send_activated) (struct xpc_partition *part);
void (*xpc_IPI_send_local_reactivate) (int from_nasid);
void (*xpc_IPI_send_disengage) (struct xpc_partition *part);

void (*xpc_IPI_send_closerequest) (struct xpc_channel *ch,
				   unsigned long *irq_flags);
void (*xpc_IPI_send_closereply) (struct xpc_channel *ch,
				 unsigned long *irq_flags);
void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch,
				  unsigned long *irq_flags);
void (*xpc_IPI_send_openreply) (struct xpc_channel *ch,
				unsigned long *irq_flags);

220 221 222
enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
				void *payload, u16 payload_size, u8 notify_type,
				xpc_notify_func func, void *key);
223
void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
224

225 226 227 228 229 230
/*
 * Timer function to enforce the timelimit on the partition disengage request.
 */
static void
xpc_timeout_partition_disengage_request(unsigned long data)
{
231
	struct xpc_partition *part = (struct xpc_partition *)data;
232

233
	DBUG_ON(time_is_after_jiffies(part->disengage_request_timeout));
234

235
	(void)xpc_partition_disengaged(part);
236 237 238 239 240

	DBUG_ON(part->disengage_request_timeout != 0);
	DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
}

241 242 243 244
/*
 * Notify the heartbeat check thread that an IRQ has been received.
 */
static irqreturn_t
A
Al Viro 已提交
245
xpc_act_IRQ_handler(int irq, void *dev_id)
246 247 248 249 250 251 252 253 254 255 256 257 258 259
{
	atomic_inc(&xpc_act_IRQ_rcvd);
	wake_up_interruptible(&xpc_act_IRQ_wq);
	return IRQ_HANDLED;
}

/*
 * Timer to produce the heartbeat.  The timer structures function is
 * already set when this is initially called.  A tunable is used to
 * specify when the next timeout should occur.
 */
static void
xpc_hb_beater(unsigned long dummy)
{
260
	xpc_increment_heartbeat();
261

262
	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
263 264 265 266 267 268
		wake_up_interruptible(&xpc_act_IRQ_wq);

	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
	add_timer(&xpc_hb_timer);
}

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
static void
xpc_start_hb_beater(void)
{
	xpc_heartbeat_init();
	init_timer(&xpc_hb_timer);
	xpc_hb_timer.function = xpc_hb_beater;
	xpc_hb_beater(0);
}

static void
xpc_stop_hb_beater(void)
{
	del_timer_sync(&xpc_hb_timer);
	xpc_heartbeat_exit();
}

285 286 287 288 289 290 291 292 293
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
	int last_IRQ_count = 0;
	int new_IRQ_count;
294
	int force_IRQ = 0;
295 296 297

	/* this thread was marked active by xpc_hb_init() */

298
	set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
299

300
	/* set our heartbeating to other partitions into motion */
301
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
302
	xpc_start_hb_beater();
303

304
	while (!xpc_exiting) {
305 306 307

		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
			"been received\n",
308
			(int)(xpc_hb_check_timeout - jiffies),
309 310 311
			atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);

		/* checking of remote heartbeats is skewed by IRQ handling */
312
		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
313 314 315 316 317 318 319 320 321 322 323
			dev_dbg(xpc_part, "checking remote heartbeats\n");
			xpc_check_remote_hb();

			/*
			 * We need to periodically recheck to ensure no
			 * IPI/AMO pairs have been missed.  That check
			 * must always reset xpc_hb_check_timeout.
			 */
			force_IRQ = 1;
		}

324
		/* check for outstanding IRQs */
325 326 327 328 329 330 331
		new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
		if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
			force_IRQ = 0;

			dev_dbg(xpc_part, "found an IRQ to process; will be "
				"resetting xpc_hb_check_timeout\n");

332 333
			xpc_process_act_IRQ_rcvd(new_IRQ_count -
						 last_IRQ_count);
334 335 336
			last_IRQ_count = new_IRQ_count;

			xpc_hb_check_timeout = jiffies +
337
			    (xpc_hb_check_interval * HZ);
338
		}
339 340

		/* wait for IRQ or timeout */
341 342 343
		(void)wait_event_interruptible(xpc_act_IRQ_wq,
					       (last_IRQ_count <
						atomic_read(&xpc_act_IRQ_rcvd)
344 345
						|| time_is_before_eq_jiffies(
						xpc_hb_check_timeout) ||
346
						xpc_exiting));
347 348
	}

349 350
	xpc_stop_hb_beater();

351 352
	dev_dbg(xpc_part, "heartbeat checker is exiting\n");

353
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
354
	complete(&xpc_hb_checker_exited);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	return 0;
}

/*
 * This thread will attempt to discover other partitions to activate
 * based on info provided by SAL. This new thread is short lived and
 * will exit once discovery is complete.
 */
static int
xpc_initiate_discovery(void *ignore)
{
	xpc_discovery();

	dev_dbg(xpc_part, "discovery thread is exiting\n");

370
	/* mark this thread as having exited */
J
Jes Sorensen 已提交
371
	complete(&xpc_discovery_exited);
372 373 374 375 376
	return 0;
}

/*
 * The first kthread assigned to a newly activated partition is the one
377
 * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
378 379 380 381 382 383 384 385 386 387 388 389 390
 * that kthread until the partition is brought down, at which time that kthread
 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
 * that XPC has dismantled all communication infrastructure for the associated
 * partition.) This kthread becomes the channel manager for that partition.
 *
 * Each active partition has a channel manager, who, besides connecting and
 * disconnecting channels, will ensure that each of the partition's connected
 * channels has the required number of assigned kthreads to get the work done.
 */
static void
xpc_channel_mgr(struct xpc_partition *part)
{
	while (part->act_state != XPC_P_DEACTIVATING ||
391 392
	       atomic_read(&part->nchannels_active) > 0 ||
	       !xpc_partition_disengaged(part)) {
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409

		xpc_process_channel_activity(part);

		/*
		 * Wait until we've been requested to activate kthreads or
		 * all of the channel's message queues have been torn down or
		 * a signal is pending.
		 *
		 * The channel_mgr_requests is set to 1 after being awakened,
		 * This is done to prevent the channel mgr from making one pass
		 * through the loop for each request, since he will
		 * be servicing all the requests in one pass. The reason it's
		 * set to 1 instead of 0 is so that other kthreads will know
		 * that the channel mgr is running and won't bother trying to
		 * wake him up.
		 */
		atomic_dec(&part->channel_mgr_requests);
410
		(void)wait_event_interruptible(part->channel_mgr_wq,
411 412 413 414 415
				(atomic_read(&part->channel_mgr_requests) > 0 ||
				 part->local_IPI_amo != 0 ||
				 (part->act_state == XPC_P_DEACTIVATING &&
				 atomic_read(&part->nchannels_active) == 0 &&
				 xpc_partition_disengaged(part))));
416 417 418 419 420 421 422 423 424 425 426
		atomic_set(&part->channel_mgr_requests, 1);
	}
}

/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
427 428 429
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
430 431 432 433
 */
static int
xpc_activating(void *__partid)
{
434
	short partid = (u64)__partid;
435 436 437
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

438
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455

	spin_lock_irqsave(&part->act_lock, irq_flags);

	if (part->act_state == XPC_P_DEACTIVATING) {
		part->act_state = XPC_P_INACTIVE;
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
	DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
	part->act_state = XPC_P_ACTIVATING;

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

456
	dev_dbg(xpc_part, "activating partition %d\n", partid);
457

458
	xpc_allow_hb(partid);
459

460 461 462 463 464 465 466 467 468 469 470 471
	if (xpc_setup_infrastructure(part) == xpSuccess) {
		(void)xpc_part_ref(part);	/* this will always succeed */

		if (xpc_make_first_contact(part) == xpSuccess) {
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
		xpc_teardown_infrastructure(part);
	}
472

473
	xpc_disallow_hb(partid);
474 475
	xpc_mark_partition_inactive(part);

476
	if (part->reason == xpReactivating) {
477
		/* interrupting ourselves results in activating partition */
478
		xpc_IPI_send_local_reactivate(part->reactivate_nasid);
479 480 481 482 483 484 485 486
	}

	return 0;
}

void
xpc_activate_partition(struct xpc_partition *part)
{
487
	short partid = XPC_PARTID(part);
488
	unsigned long irq_flags;
489
	struct task_struct *kthread;
490 491 492 493 494

	spin_lock_irqsave(&part->act_lock, irq_flags);

	DBUG_ON(part->act_state != XPC_P_INACTIVE);

495
	part->act_state = XPC_P_ACTIVATION_REQ;
496
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
497 498

	spin_unlock_irqrestore(&part->act_lock, irq_flags);
499

500 501 502
	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
503 504
		spin_lock_irqsave(&part->act_lock, irq_flags);
		part->act_state = XPC_P_INACTIVE;
505
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
506 507
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
508 509
}

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
/*
 * Check to see if there is any channel activity to/from the specified
 * partition.
 */
static void
xpc_check_for_channel_activity(struct xpc_partition *part)
{
	u64 IPI_amo;
	unsigned long irq_flags;

/* this needs to be uncommented, but I'm thinking this function and the */
/* ones that call it need to be moved into xpc_sn2.c... */
	IPI_amo = 0; /* = xpc_IPI_receive(part->local_IPI_amo_va); */
	if (IPI_amo == 0)
		return;

	spin_lock_irqsave(&part->IPI_lock, irq_flags);
	part->local_IPI_amo |= IPI_amo;
	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);

	dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
		XPC_PARTID(part), IPI_amo);

	xpc_wakeup_channel_mgr(part);
}

536 537 538 539
/*
 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
 * than one partition, we use an AMO_t structure per partition to indicate
540
 * whether a partition has sent an IPI or not.  If it has, then wake up the
541 542 543 544 545 546 547 548 549 550 551 552
 * associated kthread to handle it.
 *
 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
 * running on other partitions.
 *
 * Noteworthy Arguments:
 *
 *	irq - Interrupt ReQuest number. NOT USED.
 *
 *	dev_id - partid of IPI's potential sender.
 */
irqreturn_t
A
Al Viro 已提交
553
xpc_notify_IRQ_handler(int irq, void *dev_id)
554
{
555
	short partid = (short)(u64)dev_id;
556 557
	struct xpc_partition *part = &xpc_partitions[partid];

558
	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579

	if (xpc_part_ref(part)) {
		xpc_check_for_channel_activity(part);

		xpc_part_deref(part);
	}
	return IRQ_HANDLED;
}

/*
 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
 * because the write to their associated IPI amo completed after the IRQ/IPI
 * was received.
 */
void
xpc_dropped_IPI_check(struct xpc_partition *part)
{
	if (xpc_part_ref(part)) {
		xpc_check_for_channel_activity(part);

		part->dropped_IPI_timer.expires = jiffies +
580
		    XPC_P_DROPPED_IPI_WAIT_INTERVAL;
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		add_timer(&part->dropped_IPI_timer);
		xpc_part_deref(part);
	}
}

void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

606
	if (needed <= 0)
607 608 609 610
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
611
		if (needed <= 0)
612 613 614 615 616 617
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

618
	xpc_create_kthreads(ch, needed, 0);
619 620 621 622 623 624 625 626 627 628 629
}

/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
	do {
		/* deliver messages to their intended recipients */

630 631
		while (ch->w_local_GP.get < ch->w_remote_GP.put &&
		       !(ch->flags & XPC_C_DISCONNECTING)) {
632 633 634 635
			xpc_deliver_msg(ch);
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
636
		    ch->kthreads_idle_limit) {
637 638 639 640 641 642 643 644
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

645
		(void)wait_event_interruptible_exclusive(ch->idle_wq,
646 647
				(ch->w_local_GP.get < ch->w_remote_GP.put ||
				 (ch->flags & XPC_C_DISCONNECTING)));
648 649 650

		atomic_dec(&ch->kthreads_idle);

651
	} while (!(ch->flags & XPC_C_DISCONNECTING));
652 653 654
}

static int
655
xpc_kthread_start(void *args)
656
{
657
	short partid = XPC_UNPACK_ARG1(args);
658 659 660 661
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
662
	unsigned long irq_flags;
663 664 665 666 667 668 669 670 671 672

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

673
		spin_lock_irqsave(&ch->lock, irq_flags);
674 675
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
676 677
			spin_unlock_irqrestore(&ch->lock, irq_flags);

678 679
			xpc_connected_callout(ch);

680 681 682 683
			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

684 685 686 687 688 689 690 691
			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
			n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
692
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
693
				xpc_activate_kthreads(ch, n_needed);
694

695 696
		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
697 698 699 700 701
		}

		xpc_kthread_waitmsgs(part, ch);
	}

702
	/* let registerer know that connection is disconnecting */
703

704 705
	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
706
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
707
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
708
		spin_unlock_irqrestore(&ch->lock, irq_flags);
709

710
		xpc_disconnect_callout(ch, xpDisconnecting);
711 712 713 714 715 716 717

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

	if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
718 719 720 721
		if (atomic_dec_return(&part->nchannels_engaged) == 0) {
			xpc_mark_partition_disengaged(part);
			xpc_IPI_send_disengage(part);
		}
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}

/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
746
xpc_create_kthreads(struct xpc_channel *ch, int needed,
747
		    int ignore_disconnecting)
748 749 750
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
751
	struct xpc_partition *part = &xpc_partitions[ch->partid];
752
	struct task_struct *kthread;
753 754

	while (needed-- > 0) {
755 756 757 758 759 760

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
761 762 763 764
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
765
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
766 767 768 769 770 771 772 773 774 775
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
			if (atomic_inc_return(&part->nchannels_engaged) == 1)
				xpc_mark_partition_engaged(part);
		}
776
		(void)xpc_part_ref(part);
777 778
		xpc_msgqueue_ref(ch);

779 780 781
		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
782
			/* the fork failed */
783 784 785 786 787 788 789

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
790
			 * them is the xpDisconnecting callout that this
791
			 * failed kthread_run() would have made.
792 793
			 */

794 795 796 797 798 799 800
			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
				xpc_mark_partition_disengaged(part);
				xpc_IPI_send_disengage(part);
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);
801 802

			if (atomic_read(&ch->kthreads_assigned) <
803
			    ch->kthreads_idle_limit) {
804 805 806 807 808 809
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
810
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
811
						       &irq_flags);
812 813 814 815 816 817 818 819 820 821
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}

void
xpc_disconnect_wait(int ch_number)
{
822
	unsigned long irq_flags;
823
	short partid;
824 825
	struct xpc_partition *part;
	struct xpc_channel *ch;
826
	int wakeup_channel_mgr;
827 828

	/* now wait for all callouts to the caller's function to cease */
829
	for (partid = 0; partid < xp_max_npartitions; partid++) {
830 831
		part = &xpc_partitions[partid];

832
		if (!xpc_part_ref(part))
833
			continue;
834

835
		ch = &part->channels[ch_number];
836

837
		if (!(ch->flags & XPC_C_WDISCONNECT)) {
838
			xpc_part_deref(part);
839
			continue;
840
		}
841

J
Jes Sorensen 已提交
842
		wait_for_completion(&ch->wdisconnect_wait);
843 844 845 846 847 848 849 850 851

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

		if (ch->delayed_IPI_flags) {
			if (part->act_state != XPC_P_DEACTIVATING) {
				spin_lock(&part->IPI_lock);
				XPC_SET_IPI_FLAGS(part->local_IPI_amo,
852 853
						  ch->number,
						  ch->delayed_IPI_flags);
854 855 856 857
				spin_unlock(&part->IPI_lock);
				wakeup_channel_mgr = 1;
			}
			ch->delayed_IPI_flags = 0;
858
		}
859 860 861 862

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

863
		if (wakeup_channel_mgr)
864 865 866
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
867 868 869 870
	}
}

static void
871
xpc_do_exit(enum xp_retval reason)
872
{
873
	short partid;
874
	int active_part_count, printed_waiting_msg = 0;
875
	struct xpc_partition *part;
876
	unsigned long printmsg_time, disengage_request_timeout = 0;
877

878 879
	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
	DBUG_ON(xpc_exiting == 1);
880 881

	/*
882 883 884
	 * Let the heartbeat checker thread and the discovery thread
	 * (if one is running) know that they should exit. Also wake up
	 * the heartbeat checker thread in case it's sleeping.
885 886 887 888
	 */
	xpc_exiting = 1;
	wake_up_interruptible(&xpc_act_IRQ_wq);

889 890
	/* ignore all incoming interrupts */
	free_irq(SGI_XPC_ACTIVATE, NULL);
891

892
	/* wait for the discovery thread to exit */
J
Jes Sorensen 已提交
893
	wait_for_completion(&xpc_discovery_exited);
894

895
	/* wait for the heartbeat checker thread to exit */
J
Jes Sorensen 已提交
896
	wait_for_completion(&xpc_hb_checker_exited);
897

898
	/* sleep for a 1/3 of a second or so */
899
	(void)msleep_interruptible(300);
900 901 902

	/* wait for all partitions to become inactive */

903 904
	printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
	xpc_disengage_request_timedout = 0;
905

906 907 908
	do {
		active_part_count = 0;

909
		for (partid = 0; partid < xp_max_npartitions; partid++) {
910 911
			part = &xpc_partitions[partid];

912
			if (xpc_partition_disengaged(part) &&
913
			    part->act_state == XPC_P_INACTIVE) {
914
				continue;
915
			}
916 917 918 919

			active_part_count++;

			XPC_DEACTIVATE_PARTITION(part, reason);
920

921
			if (part->disengage_request_timeout >
922
			    disengage_request_timeout) {
923
				disengage_request_timeout =
924
				    part->disengage_request_timeout;
925
			}
926
		}
927

928
		if (xpc_partition_engaged(-1UL)) {
929
			if (time_is_before_jiffies(printmsg_time)) {
930
				dev_info(xpc_part, "waiting for remote "
931 932 933 934
					 "partitions to disengage, timeout in "
					 "%ld seconds\n",
					 (disengage_request_timeout - jiffies)
					 / HZ);
935
				printmsg_time = jiffies +
936
				    (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
937 938 939 940 941 942
				printed_waiting_msg = 1;
			}

		} else if (active_part_count > 0) {
			if (printed_waiting_msg) {
				dev_info(xpc_part, "waiting for local partition"
943
					 " to disengage\n");
944 945 946 947 948 949
				printed_waiting_msg = 0;
			}

		} else {
			if (!xpc_disengage_request_timedout) {
				dev_info(xpc_part, "all partitions have "
950
					 "disengaged\n");
951 952
			}
			break;
953 954
		}

955
		/* sleep for a 1/3 of a second or so */
956
		(void)msleep_interruptible(300);
957 958 959 960

	} while (1);

	DBUG_ON(xpc_partition_engaged(-1UL));
961
	DBUG_ON(xpc_any_hbs_allowed() != 0);
962 963

	/* indicate to others that our reserved page is uninitialized */
964
	xpc_rsvd_page->stamp = 0;
965

966
	if (reason == xpUnloading) {
967
		(void)unregister_die_notifier(&xpc_die_notifier);
968
		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
969
	}
970

971 972 973 974 975 976
	/* close down protections for IPI operations */
	xpc_restrict_IPI_ops();

	/* clear the interface to XPC's functions */
	xpc_clear_interface();

977
	if (xpc_sysctl)
978
		unregister_sysctl_table(xpc_sysctl);
979

980
	kfree(xpc_partitions);
981
	kfree(xpc_remote_copy_buffer_base);
982 983
}

984
/*
985 986 987 988 989
 * This function is called when the system is being rebooted.
 */
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
990
	enum xp_retval reason;
991 992 993

	switch (event) {
	case SYS_RESTART:
994
		reason = xpSystemReboot;
995 996
		break;
	case SYS_HALT:
997
		reason = xpSystemHalt;
998 999
		break;
	case SYS_POWER_OFF:
1000
		reason = xpSystemPoweroff;
1001 1002
		break;
	default:
1003
		reason = xpSystemGoingDown;
1004 1005 1006 1007 1008 1009 1010 1011
	}

	xpc_do_exit(reason);
	return NOTIFY_DONE;
}

/*
 * Notify other partitions to disengage from all references to our memory.
1012 1013 1014 1015 1016
 */
static void
xpc_die_disengage(void)
{
	struct xpc_partition *part;
1017
	short partid;
1018
	unsigned long engaged;
1019
	long time, printmsg_time, disengage_request_timeout;
1020 1021 1022 1023

	/* keep xpc_hb_checker thread from doing anything (just in case) */
	xpc_exiting = 1;

1024
	xpc_disallow_all_hbs();	/*indicate we're deactivated */
1025

1026
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1027 1028
		part = &xpc_partitions[partid];

1029 1030
		if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
		    remote_vars_version)) {
1031 1032 1033 1034 1035 1036 1037

			/* just in case it was left set by an earlier XPC */
			xpc_clear_partition_engaged(1UL << partid);
			continue;
		}

		if (xpc_partition_engaged(1UL << partid) ||
1038
		    part->act_state != XPC_P_INACTIVE) {
1039 1040 1041 1042 1043 1044
			xpc_request_partition_disengage(part);
			xpc_mark_partition_disengaged(part);
			xpc_IPI_send_disengage(part);
		}
	}

1045 1046
	time = rtc_time();
	printmsg_time = time +
1047
	    (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1048
	disengage_request_timeout = time +
1049
	    (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1050 1051 1052

	/* wait for all other partitions to disengage from us */

1053 1054 1055 1056 1057 1058
	while (1) {
		engaged = xpc_partition_engaged(-1UL);
		if (!engaged) {
			dev_info(xpc_part, "all partitions have disengaged\n");
			break;
		}
1059

1060 1061
		time = rtc_time();
		if (time >= disengage_request_timeout) {
1062 1063
			for (partid = 0; partid < xp_max_npartitions;
			     partid++) {
1064 1065
				if (engaged & (1UL << partid)) {
					dev_info(xpc_part, "disengage from "
1066 1067
						 "remote partition %d timed "
						 "out\n", partid);
1068 1069 1070 1071 1072 1073
				}
			}
			break;
		}

		if (time >= printmsg_time) {
1074
			dev_info(xpc_part, "waiting for remote partitions to "
1075 1076 1077
				 "disengage, timeout in %ld seconds\n",
				 (disengage_request_timeout - time) /
				 sn_rtc_cycles_per_second);
1078
			printmsg_time = time +
1079 1080
			    (XPC_DISENGAGE_PRINTMSG_INTERVAL *
			     sn_rtc_cycles_per_second);
1081 1082 1083 1084 1085
		}
	}
}

/*
1086 1087 1088 1089 1090 1091
 * This function is called when the system is being restarted or halted due
 * to some sort of system failure. If this is the case we need to notify the
 * other partitions to disengage from all references to our memory.
 * This function can also be called when our heartbeater could be offlined
 * for a time. In this case we need to notify other partitions to not worry
 * about the lack of a heartbeat.
1092 1093 1094 1095 1096 1097 1098 1099 1100
 */
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
{
	switch (event) {
	case DIE_MACHINE_RESTART:
	case DIE_MACHINE_HALT:
		xpc_die_disengage();
		break;
1101 1102 1103

	case DIE_KDEBUG_ENTER:
		/* Should lack of heartbeat be ignored by other partitions? */
1104
		if (!xpc_kdebug_ignore)
1105
			break;
1106

1107
		/* fall through */
1108 1109
	case DIE_MCA_MONARCH_ENTER:
	case DIE_INIT_MONARCH_ENTER:
1110
		xpc_offline_heartbeat();
1111
		break;
1112 1113 1114

	case DIE_KDEBUG_LEAVE:
		/* Is lack of heartbeat being ignored by other partitions? */
1115
		if (!xpc_kdebug_ignore)
1116
			break;
1117

1118
		/* fall through */
1119 1120
	case DIE_MCA_MONARCH_LEAVE:
	case DIE_INIT_MONARCH_LEAVE:
1121
		xpc_online_heartbeat();
1122 1123 1124 1125 1126 1127
		break;
	}

	return NOTIFY_DONE;
}

1128 1129 1130 1131
int __init
xpc_init(void)
{
	int ret;
1132
	short partid;
1133
	struct xpc_partition *part;
1134
	struct task_struct *kthread;
1135
	size_t buf_size;
1136

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
	if (is_shub()) {
		/*
		 * The ia64-sn2 architecture supports at most 64 partitions.
		 * And the inability to unregister remote AMOs restricts us
		 * further to only support exactly 64 partitions on this
		 * architecture, no less.
		 */
		if (xp_max_npartitions != 64)
			return -EINVAL;

		xpc_init_sn2();

	} else if (is_uv()) {
		xpc_init_uv();

	} else {
1153
		return -ENODEV;
1154
	}
1155

1156 1157 1158
	snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
	snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");

1159
	buf_size = max(XPC_RP_VARS_SIZE,
1160
		       XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1161
	xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1162
							       GFP_KERNEL,
1163
						  &xpc_remote_copy_buffer_base);
1164 1165
	if (xpc_remote_copy_buffer == NULL) {
		dev_err(xpc_part, "can't get memory for remote copy buffer\n");
1166
		return -ENOMEM;
1167
	}
1168

1169 1170 1171 1172 1173 1174 1175
	xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
				 xp_max_npartitions, GFP_KERNEL);
	if (xpc_partitions == NULL) {
		dev_err(xpc_part, "can't get memory for partition structure\n");
		ret = -ENOMEM;
		goto out_1;
	}
1176 1177 1178 1179 1180 1181 1182 1183 1184

	/*
	 * The first few fields of each entry of xpc_partitions[] need to
	 * be initialized now so that calls to xpc_connect() and
	 * xpc_disconnect() can be made prior to the activation of any remote
	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
	 * PARTITION HAS BEEN ACTIVATED.
	 */
1185
	for (partid = 0; partid < xp_max_npartitions; partid++) {
1186 1187
		part = &xpc_partitions[partid];

1188
		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1189 1190 1191 1192 1193

		part->act_IRQ_rcvd = 0;
		spin_lock_init(&part->act_lock);
		part->act_state = XPC_P_INACTIVE;
		XPC_SET_REASON(part, 0, 0);
1194 1195 1196

		init_timer(&part->disengage_request_timer);
		part->disengage_request_timer.function =
1197 1198
		    xpc_timeout_partition_disengage_request;
		part->disengage_request_timer.data = (unsigned long)part;
1199

1200 1201 1202 1203 1204
		part->setup_state = XPC_P_UNSET;
		init_waitqueue_head(&part->teardown_wq);
		atomic_set(&part->references, 0);
	}

1205 1206
	xpc_sysctl = register_sysctl_table(xpc_sys_dir);

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
	/*
	 * Open up protections for IPI operations (and AMO operations on
	 * Shub 1.1 systems).
	 */
	xpc_allow_IPI_ops();

	/*
	 * Interrupts being processed will increment this atomic variable and
	 * awaken the heartbeat thread which will process the interrupts.
	 */
	atomic_set(&xpc_act_IRQ_rcvd, 0);

	/*
	 * This is safe to do before the xpc_hb_checker thread has started
	 * because the handler releases a wait queue.  If an interrupt is
	 * received before the thread is waiting, it will not go to sleep,
	 * but rather immediately process the interrupt.
	 */
	ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1226
			  "xpc hb", NULL);
1227 1228 1229
	if (ret != 0) {
		dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
			"errno=%d\n", -ret);
1230 1231
		ret = -EBUSY;
		goto out_2;
1232 1233 1234 1235 1236 1237 1238
	}

	/*
	 * Fill the partition reserved page with the information needed by
	 * other partitions to discover we are alive and establish initial
	 * communications.
	 */
1239
	xpc_rsvd_page = xpc_setup_rsvd_page();
1240
	if (xpc_rsvd_page == NULL) {
1241 1242 1243
		dev_err(xpc_part, "can't setup our reserved page\n");
		ret = -EBUSY;
		goto out_3;
1244 1245
	}

1246 1247
	/* add ourselves to the reboot_notifier_list */
	ret = register_reboot_notifier(&xpc_reboot_notifier);
1248
	if (ret != 0)
1249 1250
		dev_warn(xpc_part, "can't register reboot notifier\n");

1251
	/* add ourselves to the die_notifier list */
1252
	ret = register_die_notifier(&xpc_die_notifier);
1253
	if (ret != 0)
1254 1255
		dev_warn(xpc_part, "can't register die notifier\n");

1256 1257 1258 1259
	/*
	 * The real work-horse behind xpc.  This processes incoming
	 * interrupts and monitors remote heartbeats.
	 */
1260 1261
	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
	if (IS_ERR(kthread)) {
1262
		dev_err(xpc_part, "failed while forking hb check thread\n");
1263 1264
		ret = -EBUSY;
		goto out_4;
1265 1266 1267 1268 1269 1270 1271
	}

	/*
	 * Startup a thread that will attempt to discover other partitions to
	 * activate based on info provided by SAL. This new thread is short
	 * lived and will exit once discovery is complete.
	 */
1272 1273 1274
	kthread = kthread_run(xpc_initiate_discovery, NULL,
			      XPC_DISCOVERY_THREAD_NAME);
	if (IS_ERR(kthread)) {
1275 1276 1277
		dev_err(xpc_part, "failed while forking discovery thread\n");

		/* mark this new thread as a non-starter */
J
Jes Sorensen 已提交
1278
		complete(&xpc_discovery_exited);
1279

1280
		xpc_do_exit(xpUnloading);
1281 1282 1283 1284 1285
		return -EBUSY;
	}

	/* set the interface to point at XPC's functions */
	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1286 1287
			  xpc_initiate_send, xpc_initiate_send_notify,
			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1288 1289

	return 0;
1290 1291 1292 1293

	/* initialization was not successful */
out_4:
	/* indicate to others that our reserved page is uninitialized */
1294
	xpc_rsvd_page->stamp = 0;
1295

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
	(void)unregister_die_notifier(&xpc_die_notifier);
	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
out_3:
	free_irq(SGI_XPC_ACTIVATE, NULL);
out_2:
	xpc_restrict_IPI_ops();
	if (xpc_sysctl)
		unregister_sysctl_table(xpc_sysctl);
	kfree(xpc_partitions);
out_1:
	kfree(xpc_remote_copy_buffer_base);
	return ret;
1308 1309
}

1310
module_init(xpc_init);
1311 1312 1313 1314

void __exit
xpc_exit(void)
{
1315
	xpc_do_exit(xpUnloading);
1316 1317
}

1318
module_exit(xpc_exit);
1319 1320 1321 1322 1323 1324 1325

MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");

module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1326
		 "heartbeat increments.");
1327 1328 1329

module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1330
		 "heartbeat checks.");
1331

1332 1333
module_param(xpc_disengage_request_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1334
		 "for disengage request to complete.");
1335

1336 1337
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1338
		 "other partitions when dropping into kdebug.");