tlb_uv.c 59.4 KB
Newer Older
1 2 3
/*
 *	SGI UltraViolet TLB flush routines.
 *
4
 *	(c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
5 6 7 8
 *
 *	This code is released under the GNU General Public License version 2 or
 *	later.
 */
9
#include <linux/seq_file.h>
10
#include <linux/proc_fs.h>
11
#include <linux/debugfs.h>
12
#include <linux/kernel.h>
13
#include <linux/slab.h>
14
#include <linux/delay.h>
15 16

#include <asm/mmu_context.h>
T
Tejun Heo 已提交
17
#include <asm/uv/uv.h>
18
#include <asm/uv/uv_mmrs.h>
19
#include <asm/uv/uv_hub.h>
20
#include <asm/uv/uv_bau.h>
I
Ingo Molnar 已提交
21
#include <asm/apic.h>
22
#include <asm/tsc.h>
23
#include <asm/irq_vectors.h>
24
#include <asm/timer.h>
25

26
static struct bau_operations ops __ro_after_init;
27

28 29 30 31 32 33 34 35 36 37 38
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
static int timeout_base_ns[] = {
		20,
		160,
		1280,
		10240,
		81920,
		655360,
		5242880,
		167772160
};
C
Cliff Wickman 已提交
39

40
static int timeout_us;
41
static bool nobau = true;
42
static int nobau_perm;
43

44
/* tunables: */
C
Cliff Wickman 已提交
45 46 47 48
static int max_concurr		= MAX_BAU_CONCURRENT;
static int max_concurr_const	= MAX_BAU_CONCURRENT;
static int plugged_delay	= PLUGGED_DELAY;
static int plugsb4reset		= PLUGSB4RESET;
49
static int giveup_limit		= GIVEUP_LIMIT;
C
Cliff Wickman 已提交
50 51 52 53 54
static int timeoutsb4reset	= TIMEOUTSB4RESET;
static int ipi_reset_limit	= IPI_RESET_LIMIT;
static int complete_threshold	= COMPLETE_THRESHOLD;
static int congested_respns_us	= CONGESTED_RESPONSE_US;
static int congested_reps	= CONGESTED_REPS;
55
static int disabled_period	= DISABLED_PERIOD;
C
Cliff Wickman 已提交
56 57

static struct tunables tunables[] = {
58 59 60 61 62 63 64 65 66 67
	{&max_concurr,           MAX_BAU_CONCURRENT}, /* must be [0] */
	{&plugged_delay,         PLUGGED_DELAY},
	{&plugsb4reset,          PLUGSB4RESET},
	{&timeoutsb4reset,       TIMEOUTSB4RESET},
	{&ipi_reset_limit,       IPI_RESET_LIMIT},
	{&complete_threshold,    COMPLETE_THRESHOLD},
	{&congested_respns_us,   CONGESTED_RESPONSE_US},
	{&congested_reps,        CONGESTED_REPS},
	{&disabled_period,       DISABLED_PERIOD},
	{&giveup_limit,          GIVEUP_LIMIT}
C
Cliff Wickman 已提交
68 69
};

70 71
static struct dentry *tunables_dir;
static struct dentry *tunables_file;
72

C
Cliff Wickman 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
/* these correspond to the statistics printed by ptc_seq_show() */
static char *stat_description[] = {
	"sent:     number of shootdown messages sent",
	"stime:    time spent sending messages",
	"numuvhubs: number of hubs targeted with shootdown",
	"numuvhubs16: number times 16 or more hubs targeted",
	"numuvhubs8: number times 8 or more hubs targeted",
	"numuvhubs4: number times 4 or more hubs targeted",
	"numuvhubs2: number times 2 or more hubs targeted",
	"numuvhubs1: number times 1 hub targeted",
	"numcpus:  number of cpus targeted with shootdown",
	"dto:      number of destination timeouts",
	"retries:  destination timeout retries sent",
	"rok:   :  destination timeouts successfully retried",
	"resetp:   ipi-style resource resets for plugs",
	"resett:   ipi-style resource resets for timeouts",
	"giveup:   fall-backs to ipi-style shootdowns",
	"sto:      number of source timeouts",
	"bz:       number of stay-busy's",
	"throt:    number times spun in throttle",
	"swack:   image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
	"recv:     shootdown messages received",
	"rtime:    time spent processing messages",
	"all:      shootdown all-tlb messages",
	"one:      shootdown one-tlb messages",
	"mult:     interrupts that found multiple messages",
	"none:     interrupts that found no messages",
	"retry:    number of retry messages processed",
	"canc:     number messages canceled by retries",
	"nocan:    number retries that found nothing to cancel",
	"reset:    number of ipi-style reset requests processed",
	"rcan:     number messages canceled by reset requests",
	"disable:  number times use of the BAU was disabled",
	"enable:   number times use of the BAU was re-enabled"
};

109
static int __init setup_bau(char *arg)
110
{
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	int result;

	if (!arg)
		return -EINVAL;

	result = strtobool(arg, &nobau);
	if (result)
		return result;

	/* we need to flip the logic here, so that bau=y sets nobau to false */
	nobau = !nobau;

	if (!nobau)
		pr_info("UV BAU Enabled\n");
	else
		pr_info("UV BAU Disabled\n");

128 129
	return 0;
}
130
early_param("bau", setup_bau);
131

132
/* base pnode in this partition */
C
Cliff Wickman 已提交
133
static int uv_base_pnode __read_mostly;
134

135 136
static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
static DEFINE_PER_CPU(struct bau_control, bau_control);
137 138
static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);

139 140 141 142 143 144 145 146 147 148
static void
set_bau_on(void)
{
	int cpu;
	struct bau_control *bcp;

	if (nobau_perm) {
		pr_info("BAU not initialized; cannot be turned on\n");
		return;
	}
149
	nobau = false;
150 151
	for_each_present_cpu(cpu) {
		bcp = &per_cpu(bau_control, cpu);
152
		bcp->nobau = false;
153 154 155 156 157 158 159 160 161 162 163
	}
	pr_info("BAU turned on\n");
	return;
}

static void
set_bau_off(void)
{
	int cpu;
	struct bau_control *bcp;

164
	nobau = true;
165 166
	for_each_present_cpu(cpu) {
		bcp = &per_cpu(bau_control, cpu);
167
		bcp->nobau = true;
168 169 170 171 172
	}
	pr_info("BAU turned off\n");
	return;
}

173
/*
174 175
 * Determine the first node on a uvhub. 'Nodes' are used for kernel
 * memory allocation.
176
 */
177
static int __init uvhub_to_first_node(int uvhub)
178 179 180 181 182
{
	int node, b;

	for_each_online_node(node) {
		b = uv_node_to_blade_id(node);
183
		if (uvhub == b)
184 185
			return node;
	}
186
	return -1;
187 188 189
}

/*
190
 * Determine the apicid of the first cpu on a uvhub.
191
 */
192
static int __init uvhub_to_first_apicid(int uvhub)
193 194 195 196
{
	int cpu;

	for_each_present_cpu(cpu)
197
		if (uvhub == uv_cpu_to_blade_id(cpu))
198 199 200 201
			return per_cpu(x86_cpu_to_apicid, cpu);
	return -1;
}

202 203 204 205 206 207 208 209
/*
 * Free a software acknowledge hardware resource by clearing its Pending
 * bit. This will return a reply to the sender.
 * If the message has timed out, a reply has already been sent by the
 * hardware but the resource has not been released. In that case our
 * clear of the Timeout bit (as well) will free the resource. No reply will
 * be sent (the hardware will only do one reply per message).
 */
C
Cliff Wickman 已提交
210 211
static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
						int do_acknowledge)
212
{
213
	unsigned long dw;
C
Cliff Wickman 已提交
214
	struct bau_pq_entry *msg;
215

216
	msg = mdp->msg;
C
Cliff Wickman 已提交
217
	if (!msg->canceled && do_acknowledge) {
C
Cliff Wickman 已提交
218
		dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
219
		ops.write_l_sw_ack(dw);
220
	}
221
	msg->replied_to = 1;
C
Cliff Wickman 已提交
222
	msg->swack_vec = 0;
223 224 225
}

/*
226
 * Process the receipt of a RETRY message
227
 */
C
Cliff Wickman 已提交
228 229
static void bau_process_retry_msg(struct msg_desc *mdp,
					struct bau_control *bcp)
230
{
231 232 233 234
	int i;
	int cancel_count = 0;
	unsigned long msg_res;
	unsigned long mmr = 0;
C
Cliff Wickman 已提交
235 236 237
	struct bau_pq_entry *msg = mdp->msg;
	struct bau_pq_entry *msg2;
	struct ptc_stats *stat = bcp->statp;
238

239 240 241 242 243
	stat->d_retries++;
	/*
	 * cancel any message from msg+1 to the retry itself
	 */
	for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
C
Cliff Wickman 已提交
244 245
		if (msg2 > mdp->queue_last)
			msg2 = mdp->queue_first;
246 247 248
		if (msg2 == msg)
			break;

C
Cliff Wickman 已提交
249
		/* same conditions for cancellation as do_reset */
250
		if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
C
Cliff Wickman 已提交
251 252
		    (msg2->swack_vec) && ((msg2->swack_vec &
			msg->swack_vec) == 0) &&
253 254
		    (msg2->sending_cpu == msg->sending_cpu) &&
		    (msg2->msg_type != MSG_NOOP)) {
255
			mmr = ops.read_l_sw_ack();
C
Cliff Wickman 已提交
256
			msg_res = msg2->swack_vec;
257 258 259 260 261 262
			/*
			 * This is a message retry; clear the resources held
			 * by the previous message only if they timed out.
			 * If it has not timed out we have an unexpected
			 * situation to report.
			 */
263
			if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
C
Cliff Wickman 已提交
264
				unsigned long mr;
265
				/*
C
Cliff Wickman 已提交
266 267
				 * Is the resource timed out?
				 * Make everyone ignore the cancelled message.
268 269 270 271
				 */
				msg2->canceled = 1;
				stat->d_canceled++;
				cancel_count++;
C
Cliff Wickman 已提交
272
				mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
273
				ops.write_l_sw_ack(mr);
274
			}
275 276 277 278 279
		}
	}
	if (!cancel_count)
		stat->d_nocanceled++;
}
280

281 282 283 284
/*
 * Do all the things a cpu should do for a TLB shootdown message.
 * Other cpu's may come here at the same time for this message.
 */
C
Cliff Wickman 已提交
285 286
static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
						int do_acknowledge)
287 288
{
	short socket_ack_count = 0;
C
Cliff Wickman 已提交
289 290 291 292
	short *sp;
	struct atomic_short *asp;
	struct ptc_stats *stat = bcp->statp;
	struct bau_pq_entry *msg = mdp->msg;
293
	struct bau_control *smaster = bcp->socket_master;
294

295 296 297
	/*
	 * This must be a normal message, or retry of a normal message
	 */
298 299
	if (msg->address == TLB_FLUSH_ALL) {
		local_flush_tlb();
300
		stat->d_alltlb++;
301 302
	} else {
		__flush_tlb_one(msg->address);
303
		stat->d_onetlb++;
304
	}
305 306 307 308 309 310 311 312 313
	stat->d_requestee++;

	/*
	 * One cpu on each uvhub has the additional job on a RETRY
	 * of releasing the resource held by the message that is
	 * being retried.  That message is identified by sending
	 * cpu number.
	 */
	if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
C
Cliff Wickman 已提交
314
		bau_process_retry_msg(mdp, bcp);
315

316
	/*
C
Cliff Wickman 已提交
317
	 * This is a swack message, so we have to reply to it.
318 319 320 321
	 * Count each responding cpu on the socket. This avoids
	 * pinging the count's cache line back and forth between
	 * the sockets.
	 */
C
Cliff Wickman 已提交
322 323 324
	sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
	asp = (struct atomic_short *)sp;
	socket_ack_count = atom_asr(1, asp);
325
	if (socket_ack_count == bcp->cpus_in_socket) {
C
Cliff Wickman 已提交
326
		int msg_ack_count;
327 328 329 330
		/*
		 * Both sockets dump their completed count total into
		 * the message's count.
		 */
331
		*sp = 0;
C
Cliff Wickman 已提交
332 333
		asp = (struct atomic_short *)&msg->acknowledge_count;
		msg_ack_count = atom_asr(socket_ack_count, asp);
334 335 336 337

		if (msg_ack_count == bcp->cpus_in_uvhub) {
			/*
			 * All cpus in uvhub saw it; reply
C
Cliff Wickman 已提交
338
			 * (unless we are in the UV2 workaround)
339
			 */
C
Cliff Wickman 已提交
340
			reply_to_message(mdp, bcp, do_acknowledge);
341 342
		}
	}
343

344
	return;
345 346 347
}

/*
C
cpw@sgi.com 已提交
348
 * Determine the first cpu on a pnode.
349
 */
C
cpw@sgi.com 已提交
350
static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
351 352
{
	int cpu;
C
cpw@sgi.com 已提交
353 354 355 356 357
	struct hub_and_pnode *hpp;

	for_each_present_cpu(cpu) {
		hpp = &smaster->thp[cpu];
		if (pnode == hpp->pnode)
358
			return cpu;
C
cpw@sgi.com 已提交
359
	}
360 361 362 363 364 365 366
	return -1;
}

/*
 * Last resort when we get a large number of destination timeouts is
 * to clear resources held by a given cpu.
 * Do this with IPI so that all messages in the BAU message queue
C
Cliff Wickman 已提交
367
 * can be identified by their nonzero swack_vec field.
368
 *
369 370
 * This is entered for a single cpu on the uvhub.
 * The sender want's this uvhub to free a specific message's
C
Cliff Wickman 已提交
371
 * swack resources.
372
 */
C
Cliff Wickman 已提交
373
static void do_reset(void *ptr)
374
{
375
	int i;
C
Cliff Wickman 已提交
376 377 378 379
	struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
	struct reset_args *rap = (struct reset_args *)ptr;
	struct bau_pq_entry *msg;
	struct ptc_stats *stat = bcp->statp;
380

381 382 383
	stat->d_resets++;
	/*
	 * We're looking for the given sender, and
C
Cliff Wickman 已提交
384
	 * will free its swack resource.
385 386 387
	 * If all cpu's finally responded after the timeout, its
	 * message 'replied_to' was set.
	 */
C
Cliff Wickman 已提交
388 389 390 391
	for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
		unsigned long msg_res;
		/* do_reset: same conditions for cancellation as
		   bau_process_retry_msg() */
392 393 394
		if ((msg->replied_to == 0) &&
		    (msg->canceled == 0) &&
		    (msg->sending_cpu == rap->sender) &&
C
Cliff Wickman 已提交
395
		    (msg->swack_vec) &&
396
		    (msg->msg_type != MSG_NOOP)) {
C
Cliff Wickman 已提交
397 398
			unsigned long mmr;
			unsigned long mr;
399 400 401 402 403 404 405
			/*
			 * make everyone else ignore this message
			 */
			msg->canceled = 1;
			/*
			 * only reset the resource if it is still pending
			 */
406
			mmr = ops.read_l_sw_ack();
C
Cliff Wickman 已提交
407 408
			msg_res = msg->swack_vec;
			mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
409 410
			if (mmr & msg_res) {
				stat->d_rcanceled++;
411
				ops.write_l_sw_ack(mr);
412 413 414
			}
		}
	}
415
	return;
416 417 418
}

/*
419 420
 * Use IPI to get all target uvhubs to release resources held by
 * a given sending cpu number.
421
 */
C
cpw@sgi.com 已提交
422
static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
423
{
C
cpw@sgi.com 已提交
424 425
	int pnode;
	int apnode;
C
Cliff Wickman 已提交
426
	int maskbits;
C
cpw@sgi.com 已提交
427
	int sender = bcp->cpu;
428
	cpumask_t *mask = bcp->uvhub_master->cpumask;
C
cpw@sgi.com 已提交
429
	struct bau_control *smaster = bcp->socket_master;
430
	struct reset_args reset_args;
431

432
	reset_args.sender = sender;
433
	cpumask_clear(mask);
434
	/* find a single cpu for each uvhub in this distribution mask */
C
cpw@sgi.com 已提交
435
	maskbits = sizeof(struct pnmask) * BITSPERBYTE;
C
cpw@sgi.com 已提交
436 437
	/* each bit is a pnode relative to the partition base pnode */
	for (pnode = 0; pnode < maskbits; pnode++) {
C
Cliff Wickman 已提交
438
		int cpu;
C
cpw@sgi.com 已提交
439
		if (!bau_uvhub_isset(pnode, distribution))
440
			continue;
C
cpw@sgi.com 已提交
441 442
		apnode = pnode + bcp->partition_base_pnode;
		cpu = pnode_to_first_cpu(apnode, smaster);
443
		cpumask_set_cpu(cpu, mask);
444
	}
C
Cliff Wickman 已提交
445 446

	/* IPI all cpus; preemption is already disabled */
447
	smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
448 449 450
	return;
}

451 452 453 454 455 456
/*
 * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
 * number, not an absolute. It converts a duration in cycles to a duration in
 * ns.
 */
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
457
{
458
	struct cyc2ns_data data;
459
	unsigned long long ns;
C
Cliff Wickman 已提交
460

461 462 463
	cyc2ns_read_begin(&data);
	ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
	cyc2ns_read_end();
464 465 466 467 468 469

	return ns;
}

/*
 * The reverse of the above; converts a duration in ns to a duration in cycles.
470
 */
471 472
static inline unsigned long long ns_2_cycles(unsigned long long ns)
{
473
	struct cyc2ns_data data;
474 475
	unsigned long long cyc;

476 477 478
	cyc2ns_read_begin(&data);
	cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
	cyc2ns_read_end();
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

	return cyc;
}

static inline unsigned long cycles_2_us(unsigned long long cyc)
{
	return cycles_2_ns(cyc) / NSEC_PER_USEC;
}

static inline cycles_t sec_2_cycles(unsigned long sec)
{
	return ns_2_cycles(sec * NSEC_PER_SEC);
}

static inline unsigned long long usec_2_cycles(unsigned long usec)
{
	return ns_2_cycles(usec * NSEC_PER_USEC);
496 497
}

498
/*
499 500 501 502
 * wait for all cpus on this hub to finish their sends and go quiet
 * leaves uvhub_quiesce set so that no new broadcasts are started by
 * bau_flush_send_and_wait()
 */
C
Cliff Wickman 已提交
503
static inline void quiesce_local_uvhub(struct bau_control *hmaster)
504
{
C
Cliff Wickman 已提交
505
	atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
506 507 508 509 510
}

/*
 * mark this quiet-requestor as done
 */
C
Cliff Wickman 已提交
511
static inline void end_uvhub_quiesce(struct bau_control *hmaster)
512
{
C
Cliff Wickman 已提交
513 514 515 516 517 518 519 520 521 522 523
	atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
}

static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
{
	unsigned long descriptor_status;

	descriptor_status = uv_read_local_mmr(mmr_offset);
	descriptor_status >>= right_shift;
	descriptor_status &= UV_ACT_STATUS_MASK;
	return descriptor_status;
524 525 526 527 528
}

/*
 * Wait for completion of a broadcast software ack message
 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
529
 */
530
static int uv1_wait_completion(struct bau_desc *bau_desc,
C
Cliff Wickman 已提交
531
				struct bau_control *bcp, long try)
532 533
{
	unsigned long descriptor_status;
C
Cliff Wickman 已提交
534
	cycles_t ttm;
535 536
	u64 mmr_offset = bcp->status_mmr;
	int right_shift = bcp->status_index;
537
	struct ptc_stats *stat = bcp->statp;
538

C
Cliff Wickman 已提交
539
	descriptor_status = uv1_read_status(mmr_offset, right_shift);
540
	/* spin on the status MMR, waiting for it to go idle */
C
Cliff Wickman 已提交
541
	while ((descriptor_status != DS_IDLE)) {
542
		/*
543 544 545 546
		 * Our software ack messages may be blocked because
		 * there are no swack resources available.  As long
		 * as none of them has timed out hardware will NACK
		 * our message and its state will stay IDLE.
547
		 */
C
Cliff Wickman 已提交
548
		if (descriptor_status == DS_SOURCE_TIMEOUT) {
549 550
			stat->s_stimeout++;
			return FLUSH_GIVEUP;
C
Cliff Wickman 已提交
551
		} else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
552
			stat->s_dtimeout++;
C
Cliff Wickman 已提交
553
			ttm = get_cycles();
554 555 556 557 558 559 560

			/*
			 * Our retries may be blocked by all destination
			 * swack resources being consumed, and a timeout
			 * pending.  In that case hardware returns the
			 * ERROR that looks like a destination timeout.
			 */
C
Cliff Wickman 已提交
561
			if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
562 563 564 565 566 567 568 569 570 571 572 573
				bcp->conseccompletes = 0;
				return FLUSH_RETRY_PLUGGED;
			}

			bcp->conseccompletes = 0;
			return FLUSH_RETRY_TIMEOUT;
		} else {
			/*
			 * descriptor_status is still BUSY
			 */
			cpu_relax();
		}
C
Cliff Wickman 已提交
574
		descriptor_status = uv1_read_status(mmr_offset, right_shift);
575 576 577 578 579
	}
	bcp->conseccompletes++;
	return FLUSH_COMPLETE;
}

C
Cliff Wickman 已提交
580
/*
581 582
 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
 * But not currently used.
C
Cliff Wickman 已提交
583
 */
584
static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
585
{
586
	return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
C
Cliff Wickman 已提交
587 588
}

C
Cliff Wickman 已提交
589 590 591 592 593
/*
 * Entered when a bau descriptor has gone into a permanent busy wait because
 * of a hardware bug.
 * Workaround the bug.
 */
594
static int handle_uv2_busy(struct bau_control *bcp)
C
Cliff Wickman 已提交
595 596 597 598
{
	struct ptc_stats *stat = bcp->statp;

	stat->s_uv2_wars++;
599 600
	bcp->busy = 1;
	return FLUSH_GIVEUP;
C
Cliff Wickman 已提交
601 602
}

603
static int uv2_3_wait_completion(struct bau_desc *bau_desc,
C
Cliff Wickman 已提交
604 605 606 607
				struct bau_control *bcp, long try)
{
	unsigned long descriptor_stat;
	cycles_t ttm;
608 609
	u64 mmr_offset = bcp->status_mmr;
	int right_shift = bcp->status_index;
610
	int desc = bcp->uvhub_cpu;
C
Cliff Wickman 已提交
611
	long busy_reps = 0;
612 613
	struct ptc_stats *stat = bcp->statp;

614
	descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
C
Cliff Wickman 已提交
615

616
	/* spin on the status MMR, waiting for it to go idle */
C
Cliff Wickman 已提交
617
	while (descriptor_stat != UV2H_DESC_IDLE) {
618 619 620 621 622 623 624 625
		if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
			/*
			 * A h/w bug on the destination side may
			 * have prevented the message being marked
			 * pending, thus it doesn't get replied to
			 * and gets continually nacked until it times
			 * out with a SOURCE_TIMEOUT.
			 */
626 627
			stat->s_stimeout++;
			return FLUSH_GIVEUP;
C
Cliff Wickman 已提交
628
		} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
			ttm = get_cycles();

			/*
			 * Our retries may be blocked by all destination
			 * swack resources being consumed, and a timeout
			 * pending.  In that case hardware returns the
			 * ERROR that looks like a destination timeout.
			 * Without using the extended status we have to
			 * deduce from the short time that this was a
			 * strong nack.
			 */
			if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
				bcp->conseccompletes = 0;
				stat->s_plugged++;
				/* FLUSH_RETRY_PLUGGED causes hang on boot */
				return FLUSH_GIVEUP;
			}
646 647
			stat->s_dtimeout++;
			bcp->conseccompletes = 0;
648 649
			/* FLUSH_RETRY_TIMEOUT causes hang on boot */
			return FLUSH_GIVEUP;
650
		} else {
C
Cliff Wickman 已提交
651 652 653 654 655
			busy_reps++;
			if (busy_reps > 1000000) {
				/* not to hammer on the clock */
				busy_reps = 0;
				ttm = get_cycles();
656
				if ((ttm - bcp->send_message) > bcp->timeout_interval)
C
Cliff Wickman 已提交
657 658
					return handle_uv2_busy(bcp);
			}
659
			/*
C
Cliff Wickman 已提交
660
			 * descriptor_stat is still BUSY
661 662
			 */
			cpu_relax();
663
		}
664
		descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
665
	}
666
	bcp->conseccompletes++;
667 668 669
	return FLUSH_COMPLETE;
}

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
/*
 * Returns the status of current BAU message for cpu desc as a bit field
 * [Error][Busy][Aux]
 */
static u64 read_status(u64 status_mmr, int index, int desc)
{
	u64 stat;

	stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
	stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;

	return stat;
}

static int uv4_wait_completion(struct bau_desc *bau_desc,
				struct bau_control *bcp, long try)
{
	struct ptc_stats *stat = bcp->statp;
	u64 descriptor_stat;
	u64 mmr = bcp->status_mmr;
	int index = bcp->status_index;
	int desc = bcp->uvhub_cpu;

	descriptor_stat = read_status(mmr, index, desc);

	/* spin on the status MMR, waiting for it to go idle */
	while (descriptor_stat != UV2H_DESC_IDLE) {
		switch (descriptor_stat) {
		case UV2H_DESC_SOURCE_TIMEOUT:
			stat->s_stimeout++;
			return FLUSH_GIVEUP;

		case UV2H_DESC_DEST_TIMEOUT:
			stat->s_dtimeout++;
			bcp->conseccompletes = 0;
			return FLUSH_RETRY_TIMEOUT;

		case UV2H_DESC_DEST_STRONG_NACK:
			stat->s_plugged++;
			bcp->conseccompletes = 0;
			return FLUSH_RETRY_PLUGGED;

		case UV2H_DESC_DEST_PUT_ERR:
			bcp->conseccompletes = 0;
			return FLUSH_GIVEUP;

		default:
			/* descriptor_stat is still BUSY */
			cpu_relax();
		}
		descriptor_stat = read_status(mmr, index, desc);
	}
	bcp->conseccompletes++;
	return FLUSH_COMPLETE;
}

726
/*
C
Cliff Wickman 已提交
727
 * Our retries are blocked by all destination sw ack resources being
728 729 730
 * in use, and a timeout is pending. In that case hardware immediately
 * returns the ERROR that looks like a destination timeout.
 */
C
Cliff Wickman 已提交
731 732
static void destination_plugged(struct bau_desc *bau_desc,
			struct bau_control *bcp,
733 734 735 736
			struct bau_control *hmaster, struct ptc_stats *stat)
{
	udelay(bcp->plugged_delay);
	bcp->plugged_tries++;
C
Cliff Wickman 已提交
737

738 739
	if (bcp->plugged_tries >= bcp->plugsb4reset) {
		bcp->plugged_tries = 0;
C
Cliff Wickman 已提交
740

741
		quiesce_local_uvhub(hmaster);
C
Cliff Wickman 已提交
742

743
		spin_lock(&hmaster->queue_lock);
C
cpw@sgi.com 已提交
744
		reset_with_ipi(&bau_desc->distribution, bcp);
745
		spin_unlock(&hmaster->queue_lock);
C
Cliff Wickman 已提交
746

747
		end_uvhub_quiesce(hmaster);
C
Cliff Wickman 已提交
748

749 750 751 752 753
		bcp->ipi_attempts++;
		stat->s_resets_plug++;
	}
}

C
Cliff Wickman 已提交
754 755 756
static void destination_timeout(struct bau_desc *bau_desc,
			struct bau_control *bcp, struct bau_control *hmaster,
			struct ptc_stats *stat)
757
{
C
Cliff Wickman 已提交
758
	hmaster->max_concurr = 1;
759 760 761
	bcp->timeout_tries++;
	if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
		bcp->timeout_tries = 0;
C
Cliff Wickman 已提交
762

763
		quiesce_local_uvhub(hmaster);
C
Cliff Wickman 已提交
764

765
		spin_lock(&hmaster->queue_lock);
C
cpw@sgi.com 已提交
766
		reset_with_ipi(&bau_desc->distribution, bcp);
767
		spin_unlock(&hmaster->queue_lock);
C
Cliff Wickman 已提交
768

769
		end_uvhub_quiesce(hmaster);
C
Cliff Wickman 已提交
770

771 772 773 774 775
		bcp->ipi_attempts++;
		stat->s_resets_timeout++;
	}
}

776
/*
777 778
 * Stop all cpus on a uvhub from using the BAU for a period of time.
 * This is reversed by check_enable.
779
 */
780
static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
781
{
782 783 784 785 786 787 788 789
	int tcpu;
	struct bau_control *tbcp;
	struct bau_control *hmaster;
	cycles_t tm1;

	hmaster = bcp->uvhub_master;
	spin_lock(&hmaster->disable_lock);
	if (!bcp->baudisabled) {
790
		stat->s_bau_disabled++;
791
		tm1 = get_cycles();
792 793
		for_each_present_cpu(tcpu) {
			tbcp = &per_cpu(bau_control, tcpu);
794 795 796 797 798
			if (tbcp->uvhub_master == hmaster) {
				tbcp->baudisabled = 1;
				tbcp->set_bau_on_time =
					tm1 + bcp->disabled_period;
			}
799 800
		}
	}
801
	spin_unlock(&hmaster->disable_lock);
802 803
}

C
Cliff Wickman 已提交
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
static void count_max_concurr(int stat, struct bau_control *bcp,
				struct bau_control *hmaster)
{
	bcp->plugged_tries = 0;
	bcp->timeout_tries = 0;
	if (stat != FLUSH_COMPLETE)
		return;
	if (bcp->conseccompletes <= bcp->complete_threshold)
		return;
	if (hmaster->max_concurr >= hmaster->max_concurr_const)
		return;
	hmaster->max_concurr++;
}

static void record_send_stats(cycles_t time1, cycles_t time2,
		struct bau_control *bcp, struct ptc_stats *stat,
		int completion_status, int try)
{
	cycles_t elapsed;

	if (time2 > time1) {
		elapsed = time2 - time1;
		stat->s_time += elapsed;

		if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
			bcp->period_requests++;
			bcp->period_time += elapsed;
831
			if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
832 833
			    (bcp->period_requests > bcp->cong_reps) &&
			    ((bcp->period_time / bcp->period_requests) >
834
					usec_2_cycles(bcp->cong_response_us))) {
835 836 837
				stat->s_congested++;
				disable_for_period(bcp, stat);
			}
C
Cliff Wickman 已提交
838 839 840 841 842 843
		}
	} else
		stat->s_requestor--;

	if (completion_status == FLUSH_COMPLETE && try > 1)
		stat->s_retriesok++;
844
	else if (completion_status == FLUSH_GIVEUP) {
C
Cliff Wickman 已提交
845
		stat->s_giveup++;
846 847 848 849 850 851 852 853 854 855
		if (get_cycles() > bcp->period_end)
			bcp->period_giveups = 0;
		bcp->period_giveups++;
		if (bcp->period_giveups == 1)
			bcp->period_end = get_cycles() + bcp->disabled_period;
		if (bcp->period_giveups > bcp->giveup_limit) {
			disable_for_period(bcp, stat);
			stat->s_giveuplimit++;
		}
	}
C
Cliff Wickman 已提交
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
}

/*
 * Because of a uv1 hardware bug only a limited number of concurrent
 * requests can be made.
 */
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
{
	spinlock_t *lock = &hmaster->uvhub_lock;
	atomic_t *v;

	v = &hmaster->active_descriptor_count;
	if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
		stat->s_throttles++;
		do {
			cpu_relax();
		} while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
	}
}

/*
 * Handle the completion status of a message send.
 */
static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
			struct bau_control *bcp, struct bau_control *hmaster,
			struct ptc_stats *stat)
{
	if (completion_status == FLUSH_RETRY_PLUGGED)
		destination_plugged(bau_desc, bcp, hmaster, stat);
	else if (completion_status == FLUSH_RETRY_TIMEOUT)
		destination_timeout(bau_desc, bcp, hmaster, stat);
}

/*
890
 * Send a broadcast and wait for it to complete.
891
 *
892
 * The flush_mask contains the cpus the broadcast is to be sent to including
893
 * cpus that are on the local uvhub.
894
 *
895 896 897
 * Returns 0 if all flushing represented in the mask was done.
 * Returns 1 if it gives up entirely and the original cpu mask is to be
 * returned to the kernel.
898
 */
899 900 901
static int uv_flush_send_and_wait(struct cpumask *flush_mask,
				  struct bau_control *bcp,
				  struct bau_desc *bau_desc)
902
{
903
	int seq_number = 0;
C
Cliff Wickman 已提交
904
	int completion_stat = 0;
905
	int uv1 = 0;
906
	long try = 0;
907
	unsigned long index;
908 909
	cycles_t time1;
	cycles_t time2;
910
	struct ptc_stats *stat = bcp->statp;
911
	struct bau_control *hmaster = bcp->uvhub_master;
912
	struct uv1_bau_msg_header *uv1_hdr = NULL;
913
	struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
914

915
	if (bcp->uvhub_version == UV_BAU_V1) {
916
		uv1 = 1;
C
Cliff Wickman 已提交
917
		uv1_throttle(hmaster, stat);
918
	}
C
Cliff Wickman 已提交
919

920 921
	while (hmaster->uvhub_quiesce)
		cpu_relax();
922 923

	time1 = get_cycles();
924 925 926
	if (uv1)
		uv1_hdr = &bau_desc->header.uv1_hdr;
	else
927 928
		/* uv2 and uv3 */
		uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
929

930
	do {
931
		if (try == 0) {
932 933 934
			if (uv1)
				uv1_hdr->msg_type = MSG_REGULAR;
			else
935
				uv2_3_hdr->msg_type = MSG_REGULAR;
936 937
			seq_number = bcp->message_number++;
		} else {
938 939 940
			if (uv1)
				uv1_hdr->msg_type = MSG_RETRY;
			else
941
				uv2_3_hdr->msg_type = MSG_RETRY;
942 943
			stat->s_retry_messages++;
		}
C
Cliff Wickman 已提交
944

945 946 947
		if (uv1)
			uv1_hdr->sequence = seq_number;
		else
948
			uv2_3_hdr->sequence = seq_number;
949
		index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
950
		bcp->send_message = get_cycles();
C
Cliff Wickman 已提交
951 952 953

		write_mmr_activation(index);

954
		try++;
955
		completion_stat = ops.wait_completion(bau_desc, bcp, try);
C
Cliff Wickman 已提交
956 957

		handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
958

959
		if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
960
			bcp->ipi_attempts = 0;
961
			stat->s_overipilimit++;
C
Cliff Wickman 已提交
962
			completion_stat = FLUSH_GIVEUP;
963 964 965
			break;
		}
		cpu_relax();
C
Cliff Wickman 已提交
966 967 968
	} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
		 (completion_stat == FLUSH_RETRY_TIMEOUT));

969
	time2 = get_cycles();
C
Cliff Wickman 已提交
970 971 972

	count_max_concurr(completion_stat, bcp, hmaster);

973 974
	while (hmaster->uvhub_quiesce)
		cpu_relax();
C
Cliff Wickman 已提交
975

976
	atomic_dec(&hmaster->active_descriptor_count);
C
Cliff Wickman 已提交
977 978 979 980

	record_send_stats(time1, time2, bcp, stat, completion_stat, try);

	if (completion_stat == FLUSH_GIVEUP)
C
Cliff Wickman 已提交
981
		/* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
C
Cliff Wickman 已提交
982 983 984 985 986
		return 1;
	return 0;
}

/*
987 988 989
 * The BAU is disabled for this uvhub. When the disabled time period has
 * expired re-enable it.
 * Return 0 if it is re-enabled for all cpus on this uvhub.
C
Cliff Wickman 已提交
990 991 992 993 994
 */
static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
{
	int tcpu;
	struct bau_control *tbcp;
995
	struct bau_control *hmaster;
C
Cliff Wickman 已提交
996

997 998 999 1000 1001 1002 1003
	hmaster = bcp->uvhub_master;
	spin_lock(&hmaster->disable_lock);
	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
		stat->s_bau_reenabled++;
		for_each_present_cpu(tcpu) {
			tbcp = &per_cpu(bau_control, tcpu);
			if (tbcp->uvhub_master == hmaster) {
C
Cliff Wickman 已提交
1004 1005 1006
				tbcp->baudisabled = 0;
				tbcp->period_requests = 0;
				tbcp->period_time = 0;
1007
				tbcp->period_giveups = 0;
1008 1009
			}
		}
1010 1011
		spin_unlock(&hmaster->disable_lock);
		return 0;
C
Cliff Wickman 已提交
1012
	}
1013
	spin_unlock(&hmaster->disable_lock);
C
Cliff Wickman 已提交
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
	return -1;
}

static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
				int remotes, struct bau_desc *bau_desc)
{
	stat->s_requestor++;
	stat->s_ntargcpu += remotes + locals;
	stat->s_ntargremotes += remotes;
	stat->s_ntarglocals += locals;

	/* uvhub statistics */
	hubs = bau_uvhub_weight(&bau_desc->distribution);
	if (locals) {
		stat->s_ntarglocaluvhub++;
		stat->s_ntargremoteuvhub += (hubs - 1);
1030
	} else
C
Cliff Wickman 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
		stat->s_ntargremoteuvhub += hubs;

	stat->s_ntarguvhub += hubs;

	if (hubs >= 16)
		stat->s_ntarguvhub16++;
	else if (hubs >= 8)
		stat->s_ntarguvhub8++;
	else if (hubs >= 4)
		stat->s_ntarguvhub4++;
	else if (hubs >= 2)
		stat->s_ntarguvhub2++;
	else
		stat->s_ntarguvhub1++;
}

/*
 * Translate a cpu mask to the uvhub distribution mask in the BAU
 * activation descriptor.
 */
static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
			struct bau_desc *bau_desc, int *localsp, int *remotesp)
{
	int cpu;
	int pnode;
	int cnt = 0;
	struct hub_and_pnode *hpp;

	for_each_cpu(cpu, flush_mask) {
		/*
		 * The distribution vector is a bit map of pnodes, relative
		 * to the partition base pnode (and the partition base nasid
		 * in the header).
		 * Translate cpu to pnode and hub using a local memory array.
		 */
		hpp = &bcp->socket_master->thp[cpu];
		pnode = hpp->pnode - bcp->partition_base_pnode;
		bau_uvhub_set(pnode, &bau_desc->distribution);
		cnt++;
		if (hpp->uvhub == bcp->uvhub)
			(*localsp)++;
		else
			(*remotesp)++;
1074
	}
C
Cliff Wickman 已提交
1075 1076
	if (!cnt)
		return 1;
1077
	return 0;
1078 1079
}

C
Cliff Wickman 已提交
1080 1081
/*
 * globally purge translation cache of a virtual address or all TLB's
T
Tejun Heo 已提交
1082
 * @cpumask: mask of all cpu's in which the address is to be removed
1083
 * @mm: mm_struct containing virtual address range
1084 1085
 * @start: start virtual address to be removed from TLB
 * @end: end virtual address to be remove from TLB
T
Tejun Heo 已提交
1086
 * @cpu: the current cpu
1087 1088 1089 1090 1091 1092
 *
 * This is the entry point for initiating any UV global TLB shootdown.
 *
 * Purges the translation caches of all specified processors of the given
 * virtual address, or purges all TLB's on specified processors.
 *
T
Tejun Heo 已提交
1093 1094
 * The caller has derived the cpumask from the mm_struct.  This function
 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
1095
 *
1096 1097
 * The cpumask is converted into a uvhubmask of the uvhubs containing
 * those cpus.
1098
 *
T
Tejun Heo 已提交
1099 1100 1101 1102 1103
 * Note that this function should be called with preemption disabled.
 *
 * Returns NULL if all remote flushing was done.
 * Returns pointer to cpumask if some remote flushing remains to be
 * done.  The returned pointer is valid till preemption is re-enabled.
1104
 */
T
Tejun Heo 已提交
1105
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1106
					  const struct flush_tlb_info *info)
1107
{
1108
	unsigned int cpu = smp_processor_id();
1109
	int locals = 0, remotes = 0, hubs = 0;
1110
	struct bau_desc *bau_desc;
1111 1112 1113
	struct cpumask *flush_mask;
	struct ptc_stats *stat;
	struct bau_control *bcp;
1114
	unsigned long descriptor_status, status, address;
T
Tejun Heo 已提交
1115

1116
	bcp = &per_cpu(bau_control, cpu);
1117 1118 1119

	if (bcp->nobau)
		return cpumask;
1120

1121 1122 1123
	stat = bcp->statp;
	stat->s_enters++;

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	if (bcp->busy) {
		descriptor_status =
			read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
		status = ((descriptor_status >> (bcp->uvhub_cpu *
			UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
		if (status == UV2H_DESC_BUSY)
			return cpumask;
		bcp->busy = 0;
	}

1134 1135
	/* bau was disabled due to slow response */
	if (bcp->baudisabled) {
1136 1137
		if (check_enable(bcp, stat)) {
			stat->s_ipifordisabled++;
C
Cliff Wickman 已提交
1138
			return cpumask;
1139
		}
1140
	}
1141

1142 1143
	/*
	 * Each sending cpu has a per-cpu mask which it fills from the caller's
1144 1145
	 * cpu mask.  All cpus are converted to uvhubs and copied to the
	 * activation descriptor.
1146 1147
	 */
	flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
1148
	/* don't actually do a shootdown of the local cpu */
1149
	cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
C
Cliff Wickman 已提交
1150

1151
	if (cpumask_test_cpu(cpu, cpumask))
1152
		stat->s_ntargself++;
1153

1154
	bau_desc = bcp->descriptor_base;
1155
	bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
1156
	bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
C
Cliff Wickman 已提交
1157
	if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
1158 1159
		return NULL;

C
Cliff Wickman 已提交
1160
	record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1161

1162 1163
	if (!info->end || (info->end - info->start) <= PAGE_SIZE)
		address = info->start;
1164
	else
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
		address = TLB_FLUSH_ALL;

	switch (bcp->uvhub_version) {
	case UV_BAU_V1:
	case UV_BAU_V2:
	case UV_BAU_V3:
		bau_desc->payload.uv1_2_3.address = address;
		bau_desc->payload.uv1_2_3.sending_cpu = cpu;
		break;
	case UV_BAU_V4:
		bau_desc->payload.uv4.address = address;
		bau_desc->payload.uv4.sending_cpu = cpu;
		bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
		break;
	}

1181
	/*
1182 1183
	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
	 * or 1 if it gave up and the original cpumask should be returned.
1184
	 */
1185
	if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
1186 1187 1188
		return NULL;
	else
		return cpumask;
1189 1190
}

C
Cliff Wickman 已提交
1191
/*
1192 1193
 * Search the message queue for any 'other' unprocessed message with the
 * same software acknowledge resource bit vector as the 'msg' message.
C
Cliff Wickman 已提交
1194
 */
1195 1196
static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
						  struct bau_control *bcp)
C
Cliff Wickman 已提交
1197 1198
{
	struct bau_pq_entry *msg_next = msg + 1;
1199
	unsigned char swack_vec = msg->swack_vec;
C
Cliff Wickman 已提交
1200 1201 1202

	if (msg_next > bcp->queue_last)
		msg_next = bcp->queue_first;
1203 1204 1205
	while (msg_next != msg) {
		if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
				(msg_next->swack_vec == swack_vec))
C
Cliff Wickman 已提交
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
			return msg_next;
		msg_next++;
		if (msg_next > bcp->queue_last)
			msg_next = bcp->queue_first;
	}
	return NULL;
}

/*
 * UV2 needs to work around a bug in which an arriving message has not
 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
 * Such a message must be ignored.
 */
void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
{
	unsigned long mmr_image;
	unsigned char swack_vec;
	struct bau_pq_entry *msg = mdp->msg;
	struct bau_pq_entry *other_msg;

1226
	mmr_image = ops.read_l_sw_ack();
C
Cliff Wickman 已提交
1227 1228 1229 1230 1231 1232 1233 1234 1235
	swack_vec = msg->swack_vec;

	if ((swack_vec & mmr_image) == 0) {
		/*
		 * This message was assigned a swack resource, but no
		 * reserved acknowlegment is pending.
		 * The bug has prevented this message from setting the MMR.
		 */
		/*
1236 1237
		 * Some message has set the MMR 'pending' bit; it might have
		 * been another message.  Look for that message.
C
Cliff Wickman 已提交
1238
		 */
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
		other_msg = find_another_by_swack(msg, bcp);
		if (other_msg) {
			/*
			 * There is another. Process this one but do not
			 * ack it.
			 */
			bau_process_message(mdp, bcp, 0);
			/*
			 * Let the natural processing of that other message
			 * acknowledge it. Don't get the processing of sw_ack's
			 * out of order.
			 */
			return;
		}
C
Cliff Wickman 已提交
1253 1254 1255
	}

	/*
1256 1257
	 * Either the MMR shows this one pending a reply or there is no
	 * other message using this sw_ack, so it is safe to acknowledge it.
C
Cliff Wickman 已提交
1258 1259 1260 1261 1262 1263
	 */
	bau_process_message(mdp, bcp, 1);

	return;
}

1264 1265 1266 1267 1268 1269
/*
 * The BAU message interrupt comes here. (registered by set_intr_gate)
 * See entry_64.S
 *
 * We received a broadcast assist message.
 *
1270
 * Interrupts are disabled; this interrupt could represent
1271 1272
 * the receipt of several messages.
 *
1273 1274
 * All cores/threads on this hub get this interrupt.
 * The last one to see it does the software ack.
1275
 * (the resource will not be freed until noninterruptable cpus see this
1276
 *  interrupt; hardware may timeout the s/w ack and reply ERROR)
1277
 */
1278
void uv_bau_message_interrupt(struct pt_regs *regs)
1279 1280
{
	int count = 0;
1281
	cycles_t time_start;
C
Cliff Wickman 已提交
1282
	struct bau_pq_entry *msg;
1283 1284 1285 1286
	struct bau_control *bcp;
	struct ptc_stats *stat;
	struct msg_desc msgdesc;

1287
	ack_APIC_irq();
1288
	time_start = get_cycles();
C
Cliff Wickman 已提交
1289

1290
	bcp = &per_cpu(bau_control, smp_processor_id());
1291
	stat = bcp->statp;
C
Cliff Wickman 已提交
1292 1293 1294 1295

	msgdesc.queue_first = bcp->queue_first;
	msgdesc.queue_last = bcp->queue_last;

1296
	msg = bcp->bau_msg_head;
C
Cliff Wickman 已提交
1297
	while (msg->swack_vec) {
1298
		count++;
C
Cliff Wickman 已提交
1299 1300

		msgdesc.msg_slot = msg - msgdesc.queue_first;
1301
		msgdesc.msg = msg;
1302
		if (bcp->uvhub_version == UV_BAU_V2)
C
Cliff Wickman 已提交
1303 1304
			process_uv2_message(&msgdesc, bcp);
		else
1305
			/* no error workaround for uv1 or uv3 */
C
Cliff Wickman 已提交
1306
			bau_process_message(&msgdesc, bcp, 1);
C
Cliff Wickman 已提交
1307

1308
		msg++;
C
Cliff Wickman 已提交
1309 1310
		if (msg > msgdesc.queue_last)
			msg = msgdesc.queue_first;
1311
		bcp->bau_msg_head = msg;
1312
	}
1313
	stat->d_time += (get_cycles() - time_start);
1314
	if (!count)
1315
		stat->d_nomsg++;
1316
	else if (count > 1)
1317
		stat->d_multmsg++;
1318 1319
}

C
Cliff Wickman 已提交
1320
/*
C
Cliff Wickman 已提交
1321
 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
C
Cliff Wickman 已提交
1322 1323 1324 1325
 * shootdown message timeouts enabled.  The timeout does not cause
 * an interrupt, but causes an error message to be returned to
 * the sender.
 */
C
Cliff Wickman 已提交
1326
static void __init enable_timeouts(void)
1327
{
1328 1329
	int uvhub;
	int nuvhubs;
1330
	int pnode;
C
Cliff Wickman 已提交
1331
	unsigned long mmr_image;
1332

1333
	nuvhubs = uv_num_possible_blades();
1334

1335 1336
	for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
		if (!uv_blade_nr_possible_cpus(uvhub))
1337
			continue;
C
Cliff Wickman 已提交
1338

1339
		pnode = uv_blade_to_pnode(uvhub);
C
Cliff Wickman 已提交
1340
		mmr_image = read_mmr_misc_control(pnode);
C
Cliff Wickman 已提交
1341 1342 1343 1344 1345 1346
		/*
		 * Set the timeout period and then lock it in, in three
		 * steps; captures and locks in the period.
		 *
		 * To program the period, the SOFT_ACK_MODE must be off.
		 */
C
Cliff Wickman 已提交
1347 1348
		mmr_image &= ~(1L << SOFTACK_MSHIFT);
		write_mmr_misc_control(pnode, mmr_image);
C
Cliff Wickman 已提交
1349 1350 1351
		/*
		 * Set the 4-bit period.
		 */
C
Cliff Wickman 已提交
1352 1353 1354
		mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
		mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
		write_mmr_misc_control(pnode, mmr_image);
C
Cliff Wickman 已提交
1355
		/*
1356
		 * UV1:
C
Cliff Wickman 已提交
1357 1358 1359 1360
		 * Subsequent reversals of the timebase bit (3) cause an
		 * immediate timeout of one or all INTD resources as
		 * indicated in bits 2:0 (7 causes all of them to timeout).
		 */
C
Cliff Wickman 已提交
1361
		mmr_image |= (1L << SOFTACK_MSHIFT);
1362
		if (is_uv2_hub()) {
1363
			/* do not touch the legacy mode bit */
1364 1365
			/* hw bug workaround; do not use extended status */
			mmr_image &= ~(1L << UV2_EXT_SHFT);
1366 1367 1368
		} else if (is_uv3_hub()) {
			mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
			mmr_image |= (1L << SB_STATUS_SHFT);
1369
		}
C
Cliff Wickman 已提交
1370
		write_mmr_misc_control(pnode, mmr_image);
1371 1372 1373
	}
}

C
Cliff Wickman 已提交
1374
static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1375 1376 1377 1378 1379 1380
{
	if (*offset < num_possible_cpus())
		return offset;
	return NULL;
}

C
Cliff Wickman 已提交
1381
static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1382 1383 1384 1385 1386 1387 1388
{
	(*offset)++;
	if (*offset < num_possible_cpus())
		return offset;
	return NULL;
}

C
Cliff Wickman 已提交
1389
static void ptc_seq_stop(struct seq_file *file, void *data)
1390 1391 1392 1393
{
}

/*
C
Cliff Wickman 已提交
1394
 * Display the statistics thru /proc/sgi_uv/ptc_statistics
1395
 * 'data' points to the cpu number
C
Cliff Wickman 已提交
1396
 * Note: see the descriptions in stat_description[].
1397
 */
C
Cliff Wickman 已提交
1398
static int ptc_seq_show(struct seq_file *file, void *data)
1399 1400
{
	struct ptc_stats *stat;
1401
	struct bau_control *bcp;
1402 1403 1404 1405
	int cpu;

	cpu = *(loff_t *)data;
	if (!cpu) {
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
		seq_puts(file,
			 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
		seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
		seq_puts(file,
			 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
		seq_puts(file,
			 "rok resetp resett giveup sto bz throt disable ");
		seq_puts(file,
			 "enable wars warshw warwaits enters ipidis plugged ");
		seq_puts(file,
			 "ipiover glim cong swack recv rtime all one mult ");
		seq_puts(file, "none retry canc nocan reset rcan\n");
1418 1419
	}
	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1420
		bcp = &per_cpu(bau_control, cpu);
1421 1422 1423 1424
		if (bcp->nobau) {
			seq_printf(file, "cpu %d bau disabled\n", cpu);
			return 0;
		}
1425
		stat = bcp->statp;
1426 1427
		/* source side statistics */
		seq_printf(file,
1428
			"cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1429 1430
			   cpu, bcp->nobau, stat->s_requestor,
			   cycles_2_us(stat->s_time),
1431 1432 1433 1434
			   stat->s_ntargself, stat->s_ntarglocals,
			   stat->s_ntargremotes, stat->s_ntargcpu,
			   stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
			   stat->s_ntarguvhub, stat->s_ntarguvhub16);
1435
		seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
1436 1437
			   stat->s_ntarguvhub8, stat->s_ntarguvhub4,
			   stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1438
			   stat->s_dtimeout, stat->s_strongnacks);
1439
		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1440 1441 1442
			   stat->s_retry_messages, stat->s_retriesok,
			   stat->s_resets_plug, stat->s_resets_timeout,
			   stat->s_giveup, stat->s_stimeout,
1443 1444 1445 1446 1447 1448 1449 1450
			   stat->s_busy, stat->s_throttles);
		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
			   stat->s_bau_disabled, stat->s_bau_reenabled,
			   stat->s_uv2_wars, stat->s_uv2_wars_hw,
			   stat->s_uv2_war_waits, stat->s_enters,
			   stat->s_ipifordisabled, stat->s_plugged,
			   stat->s_overipilimit, stat->s_giveuplimit,
			   stat->s_congested);
1451

1452 1453
		/* destination side statistics */
		seq_printf(file,
1454
			"%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
1455
			   ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
1456 1457 1458 1459 1460
			   stat->d_requestee, cycles_2_us(stat->d_time),
			   stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
			   stat->d_nomsg, stat->d_retries, stat->d_canceled,
			   stat->d_nocanceled, stat->d_resets,
			   stat->d_rcanceled);
1461 1462 1463 1464
	}
	return 0;
}

1465 1466 1467 1468
/*
 * Display the tunables thru debugfs
 */
static ssize_t tunables_read(struct file *file, char __user *userbuf,
C
Cliff Wickman 已提交
1469
				size_t count, loff_t *ppos)
1470
{
1471
	char *buf;
1472 1473
	int ret;

1474 1475 1476 1477
	buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
		"max_concur plugged_delay plugsb4reset timeoutsb4reset",
		"ipi_reset_limit complete_threshold congested_response_us",
		"congested_reps disabled_period giveup_limit",
C
Cliff Wickman 已提交
1478
		max_concurr, plugged_delay, plugsb4reset,
1479
		timeoutsb4reset, ipi_reset_limit, complete_threshold,
1480 1481
		congested_respns_us, congested_reps, disabled_period,
		giveup_limit);
1482

1483 1484 1485 1486 1487 1488
	if (!buf)
		return -ENOMEM;

	ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
	kfree(buf);
	return ret;
1489 1490
}

1491
/*
C
Cliff Wickman 已提交
1492 1493
 * handle a write to /proc/sgi_uv/ptc_statistics
 * -1: reset the statistics
1494 1495
 *  0: display meaning of the statistics
 */
C
Cliff Wickman 已提交
1496 1497
static ssize_t ptc_proc_write(struct file *file, const char __user *user,
				size_t count, loff_t *data)
1498
{
1499
	int cpu;
C
Cliff Wickman 已提交
1500 1501
	int i;
	int elements;
1502
	long input_arg;
1503
	char optstr[64];
1504
	struct ptc_stats *stat;
1505

1506
	if (count == 0 || count > sizeof(optstr))
1507
		return -EINVAL;
1508 1509 1510
	if (copy_from_user(optstr, user, count))
		return -EFAULT;
	optstr[count - 1] = '\0';
C
Cliff Wickman 已提交
1511

1512 1513 1514 1515 1516 1517 1518 1519
	if (!strcmp(optstr, "on")) {
		set_bau_on();
		return count;
	} else if (!strcmp(optstr, "off")) {
		set_bau_off();
		return count;
	}

1520
	if (kstrtol(optstr, 10, &input_arg) < 0) {
1521
		pr_debug("%s is invalid\n", optstr);
1522 1523 1524
		return -EINVAL;
	}

1525
	if (input_arg == 0) {
1526
		elements = ARRAY_SIZE(stat_description);
1527 1528
		pr_debug("# cpu:      cpu number\n");
		pr_debug("Sender statistics:\n");
C
Cliff Wickman 已提交
1529
		for (i = 0; i < elements; i++)
1530
			pr_debug("%s\n", stat_description[i]);
1531 1532 1533 1534 1535
	} else if (input_arg == -1) {
		for_each_present_cpu(cpu) {
			stat = &per_cpu(ptcstats, cpu);
			memset(stat, 0, sizeof(struct ptc_stats));
		}
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
	}

	return count;
}

static int local_atoi(const char *name)
{
	int val = 0;

	for (;; name++) {
		switch (*name) {
		case '0' ... '9':
			val = 10*val+(*name-'0');
			break;
		default:
			return val;
1552
		}
1553
	}
1554 1555 1556
}

/*
C
Cliff Wickman 已提交
1557 1558
 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
 * Zero values reset them to defaults.
1559
 */
C
Cliff Wickman 已提交
1560 1561
static int parse_tunables_write(struct bau_control *bcp, char *instr,
				int count)
1562 1563 1564
{
	char *p;
	char *q;
C
Cliff Wickman 已提交
1565 1566
	int cnt = 0;
	int val;
1567
	int e = ARRAY_SIZE(tunables);
1568 1569 1570 1571 1572 1573 1574 1575 1576

	p = instr + strspn(instr, WHITESPACE);
	q = p;
	for (; *p; p = q + strspn(q, WHITESPACE)) {
		q = p + strcspn(p, WHITESPACE);
		cnt++;
		if (q == p)
			break;
	}
C
Cliff Wickman 已提交
1577
	if (cnt != e) {
1578
		pr_info("bau tunable error: should be %d values\n", e);
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
		return -EINVAL;
	}

	p = instr + strspn(instr, WHITESPACE);
	q = p;
	for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
		q = p + strcspn(p, WHITESPACE);
		val = local_atoi(p);
		switch (cnt) {
		case 0:
			if (val == 0) {
C
Cliff Wickman 已提交
1590 1591
				max_concurr = MAX_BAU_CONCURRENT;
				max_concurr_const = MAX_BAU_CONCURRENT;
1592 1593 1594
				continue;
			}
			if (val < 1 || val > bcp->cpus_in_uvhub) {
1595
				pr_debug(
1596 1597 1598 1599
				"Error: BAU max concurrent %d is invalid\n",
				val);
				return -EINVAL;
			}
C
Cliff Wickman 已提交
1600 1601
			max_concurr = val;
			max_concurr_const = val;
1602
			continue;
C
Cliff Wickman 已提交
1603
		default:
1604
			if (val == 0)
C
Cliff Wickman 已提交
1605
				*tunables[cnt].tunp = tunables[cnt].deflt;
1606
			else
C
Cliff Wickman 已提交
1607
				*tunables[cnt].tunp = val;
1608 1609 1610 1611 1612
			continue;
		}
		if (q == p)
			break;
	}
C
Cliff Wickman 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
	return 0;
}

/*
 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
 */
static ssize_t tunables_write(struct file *file, const char __user *user,
				size_t count, loff_t *data)
{
	int cpu;
	int ret;
	char instr[100];
	struct bau_control *bcp;

	if (count == 0 || count > sizeof(instr)-1)
		return -EINVAL;
	if (copy_from_user(instr, user, count))
		return -EFAULT;

	instr[count] = '\0';

1634 1635
	cpu = get_cpu();
	bcp = &per_cpu(bau_control, cpu);
C
Cliff Wickman 已提交
1636
	ret = parse_tunables_write(bcp, instr, count);
1637
	put_cpu();
C
Cliff Wickman 已提交
1638 1639 1640
	if (ret)
		return ret;

1641 1642
	for_each_present_cpu(cpu) {
		bcp = &per_cpu(bau_control, cpu);
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
		bcp->max_concurr         = max_concurr;
		bcp->max_concurr_const   = max_concurr;
		bcp->plugged_delay       = plugged_delay;
		bcp->plugsb4reset        = plugsb4reset;
		bcp->timeoutsb4reset     = timeoutsb4reset;
		bcp->ipi_reset_limit     = ipi_reset_limit;
		bcp->complete_threshold  = complete_threshold;
		bcp->cong_response_us    = congested_respns_us;
		bcp->cong_reps           = congested_reps;
		bcp->disabled_period     = sec_2_cycles(disabled_period);
		bcp->giveup_limit        = giveup_limit;
1654
	}
1655 1656 1657 1658
	return count;
}

static const struct seq_operations uv_ptc_seq_ops = {
C
Cliff Wickman 已提交
1659 1660 1661 1662
	.start		= ptc_seq_start,
	.next		= ptc_seq_next,
	.stop		= ptc_seq_stop,
	.show		= ptc_seq_show
1663 1664
};

C
Cliff Wickman 已提交
1665
static int ptc_proc_open(struct inode *inode, struct file *file)
1666 1667 1668 1669
{
	return seq_open(file, &uv_ptc_seq_ops);
}

1670 1671 1672 1673 1674
static int tunables_open(struct inode *inode, struct file *file)
{
	return 0;
}

1675
static const struct file_operations proc_uv_ptc_operations = {
C
Cliff Wickman 已提交
1676
	.open		= ptc_proc_open,
1677
	.read		= seq_read,
C
Cliff Wickman 已提交
1678
	.write		= ptc_proc_write,
1679 1680
	.llseek		= seq_lseek,
	.release	= seq_release,
1681 1682
};

1683 1684 1685 1686
static const struct file_operations tunables_fops = {
	.open		= tunables_open,
	.read		= tunables_read,
	.write		= tunables_write,
1687
	.llseek		= default_llseek,
1688 1689
};

1690
static int __init uv_ptc_init(void)
1691
{
1692
	struct proc_dir_entry *proc_uv_ptc;
1693 1694 1695 1696

	if (!is_uv_system())
		return 0;

1697 1698
	proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
				  &proc_uv_ptc_operations);
1699
	if (!proc_uv_ptc) {
1700
		pr_err("unable to create %s proc entry\n",
1701 1702 1703
		       UV_PTC_BASENAME);
		return -EINVAL;
	}
1704 1705 1706

	tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
	if (!tunables_dir) {
1707
		pr_err("unable to create debugfs directory %s\n",
1708 1709 1710 1711
		       UV_BAU_TUNABLES_DIR);
		return -EINVAL;
	}
	tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
C
Cliff Wickman 已提交
1712
					tunables_dir, NULL, &tunables_fops);
1713
	if (!tunables_file) {
1714
		pr_err("unable to create debugfs file %s\n",
1715 1716 1717
		       UV_BAU_TUNABLES_FILE);
		return -EINVAL;
	}
1718 1719 1720 1721
	return 0;
}

/*
1722
 * Initialize the sending side's sending buffers.
1723
 */
C
Cliff Wickman 已提交
1724
static void activation_descriptor_init(int node, int pnode, int base_pnode)
1725 1726
{
	int i;
1727
	int cpu;
1728
	int uv1 = 0;
1729
	unsigned long gpa;
1730
	unsigned long m;
1731
	unsigned long n;
C
Cliff Wickman 已提交
1732
	size_t dsize;
1733 1734
	struct bau_desc *bau_desc;
	struct bau_desc *bd2;
1735
	struct uv1_bau_msg_header *uv1_hdr;
1736
	struct uv2_3_bau_msg_header *uv2_3_hdr;
1737
	struct bau_control *bcp;
1738

1739
	/*
C
Cliff Wickman 已提交
1740 1741
	 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
	 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1742
	 */
C
Cliff Wickman 已提交
1743 1744
	dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
	bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1745
	BUG_ON(!bau_desc);
1746

1747 1748
	gpa = uv_gpa(bau_desc);
	n = uv_gpa_to_gnode(gpa);
1749
	m = ops.bau_gpa_to_offset(gpa);
1750 1751
	if (is_uv1_hub())
		uv1 = 1;
1752

1753
	/* the 14-bit pnode */
C
Cliff Wickman 已提交
1754
	write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1755
	/*
C
Cliff Wickman 已提交
1756
	 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1757
	 * cpu even though we only use the first one; one descriptor can
1758
	 * describe a broadcast to 256 uv hubs.
1759
	 */
C
Cliff Wickman 已提交
1760
	for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1761
		memset(bd2, 0, sizeof(struct bau_desc));
1762 1763
		if (uv1) {
			uv1_hdr = &bd2->header.uv1_hdr;
1764
			uv1_hdr->swack_flag = 1;
1765 1766 1767 1768 1769 1770 1771 1772
			/*
			 * The base_dest_nasid set in the message header
			 * is the nasid of the first uvhub in the partition.
			 * The bit map will indicate destination pnode numbers
			 * relative to that base. They may not be consecutive
			 * if nasid striding is being used.
			 */
			uv1_hdr->base_dest_nasid =
1773 1774 1775 1776
			                          UV_PNODE_TO_NASID(base_pnode);
			uv1_hdr->dest_subnodeid  = UV_LB_SUBNODEID;
			uv1_hdr->command         = UV_NET_ENDPOINT_INTD;
			uv1_hdr->int_both        = 1;
1777 1778 1779 1780 1781
			/*
			 * all others need to be set to zero:
			 *   fairness chaining multilevel count replied_to
			 */
		} else {
1782
			/*
1783
			 * BIOS uses legacy mode, but uv2 and uv3 hardware always
1784 1785
			 * uses native mode for selective broadcasts.
			 */
1786
			uv2_3_hdr = &bd2->header.uv2_3_hdr;
1787
			uv2_3_hdr->swack_flag      = 1;
1788
			uv2_3_hdr->base_dest_nasid =
1789 1790 1791
			                          UV_PNODE_TO_NASID(base_pnode);
			uv2_3_hdr->dest_subnodeid  = UV_LB_SUBNODEID;
			uv2_3_hdr->command         = UV_NET_ENDPOINT_INTD;
1792
		}
1793
	}
1794 1795 1796 1797 1798 1799
	for_each_present_cpu(cpu) {
		if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
			continue;
		bcp = &per_cpu(bau_control, cpu);
		bcp->descriptor_base = bau_desc;
	}
1800 1801 1802 1803
}

/*
 * initialize the destination side's receiving buffers
1804 1805 1806
 * entered for each uvhub in the partition
 * - node is first node (kernel memory notion) on the uvhub
 * - pnode is the uvhub's physical identifier
1807
 */
C
Cliff Wickman 已提交
1808
static void pq_init(int node, int pnode)
1809
{
1810
	int cpu;
C
Cliff Wickman 已提交
1811
	size_t plsize;
1812
	char *cp;
C
Cliff Wickman 已提交
1813
	void *vp;
1814
	unsigned long gnode, first, last, tail;
C
Cliff Wickman 已提交
1815
	struct bau_pq_entry *pqp;
1816
	struct bau_control *bcp;
1817

C
Cliff Wickman 已提交
1818 1819 1820
	plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
	vp = kmalloc_node(plsize, GFP_KERNEL, node);
	pqp = (struct bau_pq_entry *)vp;
1821
	BUG_ON(!pqp);
1822

1823
	cp = (char *)pqp + 31;
C
Cliff Wickman 已提交
1824
	pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1825 1826 1827 1828 1829 1830

	for_each_present_cpu(cpu) {
		if (pnode != uv_cpu_to_pnode(cpu))
			continue;
		/* for every cpu on this pnode: */
		bcp = &per_cpu(bau_control, cpu);
C
Cliff Wickman 已提交
1831 1832 1833
		bcp->queue_first	= pqp;
		bcp->bau_msg_head	= pqp;
		bcp->queue_last		= pqp + (DEST_Q_SIZE - 1);
1834
	}
1835

1836 1837
	first = ops.bau_gpa_to_offset(uv_gpa(pqp));
	last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
1838

1839
	/*
1840 1841
	 * Pre UV4, the gnode is required to locate the payload queue
	 * and the payload queue tail must be maintained by the kernel.
1842
	 */
1843
	bcp = &per_cpu(bau_control, smp_processor_id());
1844
	if (bcp->uvhub_version <= UV_BAU_V3) {
1845 1846 1847 1848 1849 1850
		tail = first;
		gnode = uv_gpa_to_gnode(uv_gpa(pqp));
		first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
		write_mmr_payload_tail(pnode, tail);
	}

1851 1852
	ops.write_payload_first(pnode, first);
	ops.write_payload_last(pnode, last);
C
Cliff Wickman 已提交
1853

1854
	/* in effect, all msg_type's are set to MSG_NOOP */
C
Cliff Wickman 已提交
1855
	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1856
}
1857

1858
/*
1859
 * Initialization of each UV hub's structures
1860
 */
C
Cliff Wickman 已提交
1861
static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1862
{
1863
	int node;
1864 1865
	int pnode;
	unsigned long apicid;
1866 1867 1868

	node = uvhub_to_first_node(uvhub);
	pnode = uv_blade_to_pnode(uvhub);
C
Cliff Wickman 已提交
1869 1870 1871 1872

	activation_descriptor_init(node, pnode, base_pnode);

	pq_init(node, pnode);
1873
	/*
1874 1875
	 * The below initialization can't be in firmware because the
	 * messaging IRQ will be determined by the OS.
1876
	 */
1877
	apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
C
Cliff Wickman 已提交
1878
	write_mmr_data_config(pnode, ((apicid << 32) | vector));
1879 1880
}

1881 1882 1883
/*
 * We will set BAU_MISC_CONTROL with a timeout period.
 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
C
Cliff Wickman 已提交
1884
 * So the destination timeout period has to be calculated from them.
1885
 */
C
Cliff Wickman 已提交
1886
static int calculate_destination_timeout(void)
1887 1888 1889 1890 1891 1892 1893 1894 1895
{
	unsigned long mmr_image;
	int mult1;
	int mult2;
	int index;
	int base;
	int ret;
	unsigned long ts_ns;

1896
	if (is_uv1_hub()) {
C
Cliff Wickman 已提交
1897
		mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1898 1899 1900 1901
		mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
		index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
		mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
		mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1902 1903
		ts_ns = timeout_base_ns[index];
		ts_ns *= (mult1 * mult2);
1904 1905
		ret = ts_ns / 1000;
	} else {
1906
		/* same destination timeout for uv2 and uv3 */
1907 1908
		/* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
		mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1909
		mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
C
Cliff Wickman 已提交
1910
		if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1911
			base = 80;
1912
		else
1913 1914
			base = 10;
		mult1 = mmr_image & UV2_ACK_MASK;
1915 1916
		ret = mult1 * base;
	}
1917 1918 1919
	return ret;
}

C
Cliff Wickman 已提交
1920 1921 1922 1923 1924 1925 1926 1927
static void __init init_per_cpu_tunables(void)
{
	int cpu;
	struct bau_control *bcp;

	for_each_present_cpu(cpu) {
		bcp = &per_cpu(bau_control, cpu);
		bcp->baudisabled		= 0;
1928
		if (nobau)
1929
			bcp->nobau		= true;
C
Cliff Wickman 已提交
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
		bcp->statp			= &per_cpu(ptcstats, cpu);
		/* time interval to catch a hardware stay-busy bug */
		bcp->timeout_interval		= usec_2_cycles(2*timeout_us);
		bcp->max_concurr		= max_concurr;
		bcp->max_concurr_const		= max_concurr;
		bcp->plugged_delay		= plugged_delay;
		bcp->plugsb4reset		= plugsb4reset;
		bcp->timeoutsb4reset		= timeoutsb4reset;
		bcp->ipi_reset_limit		= ipi_reset_limit;
		bcp->complete_threshold		= complete_threshold;
		bcp->cong_response_us		= congested_respns_us;
		bcp->cong_reps			= congested_reps;
1942 1943
		bcp->disabled_period		= sec_2_cycles(disabled_period);
		bcp->giveup_limit		= giveup_limit;
1944 1945
		spin_lock_init(&bcp->queue_lock);
		spin_lock_init(&bcp->uvhub_lock);
1946
		spin_lock_init(&bcp->disable_lock);
C
Cliff Wickman 已提交
1947 1948 1949
	}
}

1950
/*
C
Cliff Wickman 已提交
1951
 * Scan all cpus to collect blade and socket summaries.
1952
 */
C
Cliff Wickman 已提交
1953 1954 1955
static int __init get_cpu_topology(int base_pnode,
					struct uvhub_desc *uvhub_descs,
					unsigned char *uvhub_mask)
1956 1957 1958 1959
{
	int cpu;
	int pnode;
	int uvhub;
C
Cliff Wickman 已提交
1960
	int socket;
1961 1962 1963 1964 1965 1966
	struct bau_control *bcp;
	struct uvhub_desc *bdp;
	struct socket_desc *sdp;

	for_each_present_cpu(cpu) {
		bcp = &per_cpu(bau_control, cpu);
C
Cliff Wickman 已提交
1967

1968
		memset(bcp, 0, sizeof(struct bau_control));
C
Cliff Wickman 已提交
1969

1970
		pnode = uv_cpu_hub_info(cpu)->pnode;
C
Cliff Wickman 已提交
1971
		if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1972
			pr_emerg(
1973
				"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
C
Cliff Wickman 已提交
1974
				cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1975 1976
			return 1;
		}
C
Cliff Wickman 已提交
1977

1978
		bcp->osnode = cpu_to_node(cpu);
C
Cliff Wickman 已提交
1979 1980
		bcp->partition_base_pnode = base_pnode;

1981
		uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
C
Cliff Wickman 已提交
1982
		*(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1983
		bdp = &uvhub_descs[uvhub];
C
Cliff Wickman 已提交
1984

1985 1986 1987
		bdp->num_cpus++;
		bdp->uvhub = uvhub;
		bdp->pnode = pnode;
C
Cliff Wickman 已提交
1988

1989 1990
		/* kludge: 'assuming' one node per socket, and assuming that
		   disabling a socket just leaves a gap in node numbers */
1991
		socket = bcp->osnode & 1;
1992
		bdp->socket_mask |= (1 << socket);
1993 1994 1995
		sdp = &bdp->socket[socket];
		sdp->cpu_number[sdp->num_cpus] = cpu;
		sdp->num_cpus++;
1996
		if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1997
			pr_emerg("%d cpus per socket invalid\n",
C
Cliff Wickman 已提交
1998
				sdp->num_cpus);
1999 2000
			return 1;
		}
2001
	}
C
Cliff Wickman 已提交
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
	return 0;
}

/*
 * Each socket is to get a local array of pnodes/hubs.
 */
static void make_per_cpu_thp(struct bau_control *smaster)
{
	int cpu;
	size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();

	smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
	memset(smaster->thp, 0, hpsz);
	for_each_present_cpu(cpu) {
		smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
		smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
	}
}

2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
/*
 * Each uvhub is to get a local cpumask.
 */
static void make_per_hub_cpumask(struct bau_control *hmaster)
{
	int sz = sizeof(cpumask_t);

	hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
}

C
Cliff Wickman 已提交
2031 2032 2033 2034 2035 2036 2037 2038 2039
/*
 * Initialize all the per_cpu information for the cpu's on a given socket,
 * given what has been gathered into the socket_desc struct.
 * And reports the chosen hub and socket masters back to the caller.
 */
static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
			struct bau_control **smasterp,
			struct bau_control **hmasterp)
{
2040
	int i, cpu, uvhub_cpu;
C
Cliff Wickman 已提交
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
	struct bau_control *bcp;

	for (i = 0; i < sdp->num_cpus; i++) {
		cpu = sdp->cpu_number[i];
		bcp = &per_cpu(bau_control, cpu);
		bcp->cpu = cpu;
		if (i == 0) {
			*smasterp = bcp;
			if (!(*hmasterp))
				*hmasterp = bcp;
		}
		bcp->cpus_in_uvhub = bdp->num_cpus;
		bcp->cpus_in_socket = sdp->num_cpus;
		bcp->socket_master = *smasterp;
		bcp->uvhub = bdp->uvhub;
2056
		if (is_uv1_hub())
2057
			bcp->uvhub_version = UV_BAU_V1;
2058
		else if (is_uv2_hub())
2059
			bcp->uvhub_version = UV_BAU_V2;
2060
		else if (is_uv3_hub())
2061
			bcp->uvhub_version = UV_BAU_V3;
2062
		else if (is_uv4_hub())
2063
			bcp->uvhub_version = UV_BAU_V4;
2064
		else {
2065
			pr_emerg("uvhub version not 1, 2, 3, or 4\n");
2066 2067
			return 1;
		}
C
Cliff Wickman 已提交
2068
		bcp->uvhub_master = *hmasterp;
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
		uvhub_cpu = uv_cpu_blade_processor_id(cpu);
		bcp->uvhub_cpu = uvhub_cpu;

		/*
		 * The ERROR and BUSY status registers are located pairwise over
		 * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
		 */
		if (uvhub_cpu < UV_CPUS_PER_AS) {
			bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
			bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
		} else {
			bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
			bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
						* UV_ACT_STATUS_SIZE;
		}
2084

C
Cliff Wickman 已提交
2085
		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
2086
			pr_emerg("%d cpus per uvhub invalid\n",
C
Cliff Wickman 已提交
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
				bcp->uvhub_cpu);
			return 1;
		}
	}
	return 0;
}

/*
 * Summarize the blade and socket topology into the per_cpu structures.
 */
static int __init summarize_uvhub_sockets(int nuvhubs,
			struct uvhub_desc *uvhub_descs,
			unsigned char *uvhub_mask)
{
	int socket;
	int uvhub;
	unsigned short socket_mask;

C
Cliff Wickman 已提交
2105
	for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
C
Cliff Wickman 已提交
2106 2107 2108 2109
		struct uvhub_desc *bdp;
		struct bau_control *smaster = NULL;
		struct bau_control *hmaster = NULL;

C
Cliff Wickman 已提交
2110 2111
		if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
			continue;
C
Cliff Wickman 已提交
2112

2113
		bdp = &uvhub_descs[uvhub];
2114 2115 2116
		socket_mask = bdp->socket_mask;
		socket = 0;
		while (socket_mask) {
C
Cliff Wickman 已提交
2117 2118 2119 2120
			struct socket_desc *sdp;
			if ((socket_mask & 1)) {
				sdp = &bdp->socket[socket];
				if (scan_sock(sdp, bdp, &smaster, &hmaster))
2121
					return 1;
2122
				make_per_cpu_thp(smaster);
2123 2124
			}
			socket++;
2125
			socket_mask = (socket_mask >> 1);
2126
		}
2127
		make_per_hub_cpumask(hmaster);
2128
	}
C
Cliff Wickman 已提交
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
	return 0;
}

/*
 * initialize the bau_control structure for each cpu
 */
static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
{
	unsigned char *uvhub_mask;
	void *vp;
	struct uvhub_desc *uvhub_descs;

2141 2142
	if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
		timeout_us = calculate_destination_timeout();
C
Cliff Wickman 已提交
2143 2144 2145 2146 2147 2148 2149

	vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
	uvhub_descs = (struct uvhub_desc *)vp;
	memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
	uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);

	if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
2150
		goto fail;
C
Cliff Wickman 已提交
2151 2152

	if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
2153
		goto fail;
C
Cliff Wickman 已提交
2154

2155
	kfree(uvhub_descs);
C
Cliff Wickman 已提交
2156
	kfree(uvhub_mask);
C
Cliff Wickman 已提交
2157
	init_per_cpu_tunables();
2158
	return 0;
2159 2160 2161 2162 2163

fail:
	kfree(uvhub_descs);
	kfree(uvhub_mask);
	return 1;
2164 2165
}

2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177
static const struct bau_operations uv1_bau_ops __initconst = {
	.bau_gpa_to_offset       = uv_gpa_to_offset,
	.read_l_sw_ack           = read_mmr_sw_ack,
	.read_g_sw_ack           = read_gmmr_sw_ack,
	.write_l_sw_ack          = write_mmr_sw_ack,
	.write_g_sw_ack          = write_gmmr_sw_ack,
	.write_payload_first     = write_mmr_payload_first,
	.write_payload_last      = write_mmr_payload_last,
	.wait_completion	 = uv1_wait_completion,
};

static const struct bau_operations uv2_3_bau_ops __initconst = {
2178 2179 2180 2181 2182 2183 2184
	.bau_gpa_to_offset       = uv_gpa_to_offset,
	.read_l_sw_ack           = read_mmr_sw_ack,
	.read_g_sw_ack           = read_gmmr_sw_ack,
	.write_l_sw_ack          = write_mmr_sw_ack,
	.write_g_sw_ack          = write_gmmr_sw_ack,
	.write_payload_first     = write_mmr_payload_first,
	.write_payload_last      = write_mmr_payload_last,
2185
	.wait_completion	 = uv2_3_wait_completion,
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
};

static const struct bau_operations uv4_bau_ops __initconst = {
	.bau_gpa_to_offset       = uv_gpa_to_soc_phys_ram,
	.read_l_sw_ack           = read_mmr_proc_sw_ack,
	.read_g_sw_ack           = read_gmmr_proc_sw_ack,
	.write_l_sw_ack          = write_mmr_proc_sw_ack,
	.write_g_sw_ack          = write_gmmr_proc_sw_ack,
	.write_payload_first     = write_mmr_proc_payload_first,
	.write_payload_last      = write_mmr_proc_payload_last,
2196
	.wait_completion         = uv4_wait_completion,
2197 2198
};

2199 2200 2201 2202 2203
/*
 * Initialization of BAU-related structures
 */
static int __init uv_bau_init(void)
{
2204 2205 2206
	int uvhub;
	int pnode;
	int nuvhubs;
2207
	int cur_cpu;
C
Cliff Wickman 已提交
2208
	int cpus;
2209
	int vector;
C
Cliff Wickman 已提交
2210
	cpumask_var_t *mask;
2211 2212 2213

	if (!is_uv_system())
		return 0;
2214

2215 2216 2217
	if (is_uv4_hub())
		ops = uv4_bau_ops;
	else if (is_uv3_hub())
2218
		ops = uv2_3_bau_ops;
2219
	else if (is_uv2_hub())
2220
		ops = uv2_3_bau_ops;
2221
	else if (is_uv1_hub())
2222
		ops = uv1_bau_ops;
2223

2224 2225 2226 2227 2228 2229
	nuvhubs = uv_num_possible_blades();
	if (nuvhubs < 2) {
		pr_crit("UV: BAU disabled - insufficient hub count\n");
		goto err_bau_disable;
	}

C
Cliff Wickman 已提交
2230 2231 2232 2233
	for_each_possible_cpu(cur_cpu) {
		mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
		zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
	}
2234

C
Cliff Wickman 已提交
2235
	uv_base_pnode = 0x7fffffff;
2236
	for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
C
Cliff Wickman 已提交
2237 2238 2239
		cpus = uv_blade_nr_possible_cpus(uvhub);
		if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
			uv_base_pnode = uv_blade_to_pnode(uvhub);
2240 2241
	}

2242 2243 2244
	/* software timeouts are not supported on UV4 */
	if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
		enable_timeouts();
2245

C
Cliff Wickman 已提交
2246
	if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2247 2248
		pr_crit("UV: BAU disabled - per CPU init failed\n");
		goto err_bau_disable;
2249
	}
2250 2251

	vector = UV_BAU_MESSAGE;
2252
	for_each_possible_blade(uvhub) {
2253
		if (uv_blade_nr_possible_cpus(uvhub))
C
Cliff Wickman 已提交
2254
			init_uvhub(uvhub, vector, uv_base_pnode);
2255
	}
2256 2257 2258 2259

	alloc_intr_gate(vector, uv_bau_message_intr1);

	for_each_possible_blade(uvhub) {
2260
		if (uv_blade_nr_possible_cpus(uvhub)) {
C
Cliff Wickman 已提交
2261 2262
			unsigned long val;
			unsigned long mmr;
2263 2264
			pnode = uv_blade_to_pnode(uvhub);
			/* INIT the bau */
C
Cliff Wickman 已提交
2265 2266
			val = 1L << 63;
			write_gmmr_activation(pnode, val);
2267
			mmr = 1; /* should be 1 to broadcast to both sockets */
2268 2269
			if (!is_uv1_hub())
				write_mmr_data_broadcast(pnode, mmr);
2270
		}
2271
	}
2272

2273
	return 0;
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283

err_bau_disable:

	for_each_possible_cpu(cur_cpu)
		free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));

	set_bau_off();
	nobau_perm = 1;

	return -EINVAL;
2284
}
2285
core_initcall(uv_bau_init);
2286
fs_initcall(uv_ptc_init);