smp.c 15.6 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Xen SMP support
 *
 * This file implements the Xen versions of smp_ops.  SMP under Xen is
 * very straightforward.  Bringing a CPU up is simply a matter of
 * loading its initial context and setting it running.
 *
 * IPIs are handled through the Xen event mechanism.
 *
 * Because virtual CPUs can be scheduled onto any real CPU, there's no
 * useful topology information for the kernel to make use of.  As a
 * result, all CPUs are treated as if they're single-core and
 * single-threaded.
 */
#include <linux/sched.h>
#include <linux/err.h>
17
#include <linux/slab.h>
J
Jeremy Fitzhardinge 已提交
18
#include <linux/smp.h>
19
#include <linux/irq_work.h>
J
Jeremy Fitzhardinge 已提交
20 21 22 23 24 25 26 27 28 29 30 31

#include <asm/paravirt.h>
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cpu.h>

#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>

#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>

32
#include <xen/xen.h>
J
Jeremy Fitzhardinge 已提交
33 34 35
#include <xen/page.h>
#include <xen/events.h>

36
#include <xen/hvc-console.h>
J
Jeremy Fitzhardinge 已提交
37 38 39
#include "xen-ops.h"
#include "mmu.h"

40
cpumask_var_t xen_cpu_initialized_map;
J
Jeremy Fitzhardinge 已提交
41

42 43 44
static DEFINE_PER_CPU(int, xen_resched_irq);
static DEFINE_PER_CPU(int, xen_callfunc_irq);
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45
static DEFINE_PER_CPU(int, xen_irq_work);
46
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
J
Jeremy Fitzhardinge 已提交
47 48

static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
49
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
J
Jeremy Fitzhardinge 已提交
51 52

/*
53
 * Reschedule call back.
J
Jeremy Fitzhardinge 已提交
54 55 56
 */
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
57
	inc_irq_stat(irq_resched_count);
58
	scheduler_ipi();
59

J
Jeremy Fitzhardinge 已提交
60 61 62
	return IRQ_HANDLED;
}

63
static void __cpuinit cpu_bringup(void)
J
Jeremy Fitzhardinge 已提交
64
{
65
	int cpu;
J
Jeremy Fitzhardinge 已提交
66 67

	cpu_init();
A
Alex Nixon 已提交
68
	touch_softlockup_watchdog();
69 70
	preempt_disable();

71
	xen_enable_sysenter();
72
	xen_enable_syscall();
J
Jeremy Fitzhardinge 已提交
73

74 75 76 77
	cpu = smp_processor_id();
	smp_store_cpu_info(cpu);
	cpu_data(cpu).x86_max_cores = 1;
	set_cpu_sibling_map(cpu);
J
Jeremy Fitzhardinge 已提交
78 79 80

	xen_setup_cpu_clockevents();

81 82
	notify_cpu_starting(cpu);

83
	set_cpu_online(cpu, true);
84

85
	this_cpu_write(cpu_state, CPU_ONLINE);
86

87 88
	wmb();

J
Jeremy Fitzhardinge 已提交
89 90 91 92
	/* We can take interrupts now: we're officially "up". */
	local_irq_enable();

	wmb();			/* make sure everything is out */
A
Alex Nixon 已提交
93 94
}

95
static void __cpuinit cpu_bringup_and_idle(void)
A
Alex Nixon 已提交
96 97
{
	cpu_bringup();
T
Thomas Gleixner 已提交
98
	cpu_startup_entry(CPUHP_ONLINE);
J
Jeremy Fitzhardinge 已提交
99 100 101 102 103
}

static int xen_smp_intr_init(unsigned int cpu)
{
	int rc;
104
	const char *resched_name, *callfunc_name, *debug_name;
J
Jeremy Fitzhardinge 已提交
105 106 107 108 109 110 111 112 113 114

	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
				    cpu,
				    xen_reschedule_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    resched_name,
				    NULL);
	if (rc < 0)
		goto fail;
115
	per_cpu(xen_resched_irq, cpu) = rc;
J
Jeremy Fitzhardinge 已提交
116 117 118 119 120 121 122 123 124 125

	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
				    cpu,
				    xen_call_function_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
126
	per_cpu(xen_callfunc_irq, cpu) = rc;
J
Jeremy Fitzhardinge 已提交
127

128 129 130 131 132 133
	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
				     debug_name, NULL);
	if (rc < 0)
		goto fail;
134
	per_cpu(xen_debug_irq, cpu) = rc;
135

136 137 138 139 140 141 142 143 144
	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
				    cpu,
				    xen_call_function_single_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
145
	per_cpu(xen_callfuncsingle_irq, cpu) = rc;
146

147 148 149 150 151 152 153
	/*
	 * The IRQ worker on PVHVM goes through the native path and uses the
	 * IPI mechanism.
	 */
	if (xen_hvm_domain())
		return 0;

154 155 156 157 158 159 160 161 162 163 164
	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
				    cpu,
				    xen_irq_work_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
	per_cpu(xen_irq_work, cpu) = rc;

J
Jeremy Fitzhardinge 已提交
165 166 167
	return 0;

 fail:
168 169 170 171 172 173 174 175 176
	if (per_cpu(xen_resched_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	if (per_cpu(xen_callfunc_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	if (per_cpu(xen_debug_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
				       NULL);
177 178 179
	if (xen_hvm_domain())
		return rc;

180 181
	if (per_cpu(xen_irq_work, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
182

J
Jeremy Fitzhardinge 已提交
183 184 185
	return rc;
}

186
static void __init xen_fill_possible_map(void)
J
Jeremy Fitzhardinge 已提交
187 188 189
{
	int i, rc;

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
	if (xen_initial_domain())
		return;

	for (i = 0; i < nr_cpu_ids; i++) {
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
		if (rc >= 0) {
			num_processors++;
			set_cpu_possible(i, true);
		}
	}
}

static void __init xen_filter_cpu_maps(void)
{
	int i, rc;
205
	unsigned int subtract = 0;
206 207 208 209

	if (!xen_initial_domain())
		return;

210 211
	num_processors = 0;
	disabled_cpus = 0;
212
	for (i = 0; i < nr_cpu_ids; i++) {
J
Jeremy Fitzhardinge 已提交
213
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
J
Jeremy Fitzhardinge 已提交
214 215
		if (rc >= 0) {
			num_processors++;
216
			set_cpu_possible(i, true);
217 218 219
		} else {
			set_cpu_possible(i, false);
			set_cpu_present(i, false);
220
			subtract++;
J
Jeremy Fitzhardinge 已提交
221
		}
J
Jeremy Fitzhardinge 已提交
222
	}
223 224 225 226 227 228 229 230 231 232 233 234 235
#ifdef CONFIG_HOTPLUG_CPU
	/* This is akin to using 'nr_cpus' on the Linux command line.
	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
	 * have up to X, while nr_cpu_ids is greater than X. This
	 * normally is not a problem, except when CPU hotplugging
	 * is involved and then there might be more than X CPUs
	 * in the guest - which will not work as there is no
	 * hypercall to expand the max number of VCPUs an already
	 * running guest has. So cap it up to X. */
	if (subtract)
		nr_cpu_ids = nr_cpu_ids - subtract;
#endif

J
Jeremy Fitzhardinge 已提交
236 237
}

238
static void __init xen_smp_prepare_boot_cpu(void)
J
Jeremy Fitzhardinge 已提交
239 240 241 242 243 244
{
	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	/* We've switched to the "real" per-cpu gdt, so make sure the
	   old memory can be recycled */
245
	make_lowmem_page_readwrite(xen_initial_gdt);
246

247
	xen_filter_cpu_maps();
248
	xen_setup_vcpu_info_placement();
J
Jeremy Fitzhardinge 已提交
249 250
}

251
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
J
Jeremy Fitzhardinge 已提交
252 253
{
	unsigned cpu;
A
Andrew Jones 已提交
254
	unsigned int i;
J
Jeremy Fitzhardinge 已提交
255

256 257 258 259 260 261 262 263 264
	if (skip_ioapic_setup) {
		char *m = (max_cpus == 0) ?
			"The nosmp parameter is incompatible with Xen; " \
			"use Xen dom0_max_vcpus=1 parameter" :
			"The noapic parameter is incompatible with Xen";

		xen_raw_printk(m);
		panic(m);
	}
265 266
	xen_init_lock_cpu(0);

267
	smp_store_boot_cpu_info();
268
	cpu_data(0).x86_max_cores = 1;
A
Andrew Jones 已提交
269 270 271 272 273 274

	for_each_possible_cpu(i) {
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
	}
J
Jeremy Fitzhardinge 已提交
275 276 277 278 279
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

280 281 282 283
	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
		panic("could not allocate xen_cpu_initialized_map\n");

	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
J
Jeremy Fitzhardinge 已提交
284 285 286

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
287
		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
J
Jeremy Fitzhardinge 已提交
288
			continue;
289
		set_cpu_possible(cpu, false);
J
Jeremy Fitzhardinge 已提交
290 291
	}

292
	for_each_possible_cpu(cpu)
293
		set_cpu_present(cpu, true);
J
Jeremy Fitzhardinge 已提交
294 295
}

296
static int __cpuinit
J
Jeremy Fitzhardinge 已提交
297 298 299
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
300
	struct desc_struct *gdt;
301
	unsigned long gdt_mfn;
J
Jeremy Fitzhardinge 已提交
302

303
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
J
Jeremy Fitzhardinge 已提交
304 305 306 307 308 309
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

310 311
	gdt = get_cpu_gdt_table(cpu);

J
Jeremy Fitzhardinge 已提交
312 313
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ss = __KERNEL_DS;
314 315
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
316
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
317 318
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
319
#endif
J
Jeremy Fitzhardinge 已提交
320 321 322 323
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

324 325 326 327
	{
		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
		ctxt->user_regs.ds = __USER_DS;
		ctxt->user_regs.es = __USER_DS;
J
Jeremy Fitzhardinge 已提交
328

329
		xen_copy_trap_info(ctxt->trap_ctxt);
J
Jeremy Fitzhardinge 已提交
330

331
		ctxt->ldt_ents = 0;
332

333
		BUG_ON((unsigned long)gdt & ~PAGE_MASK);
J
Jeremy Fitzhardinge 已提交
334

335 336 337
		gdt_mfn = arbitrary_virt_to_mfn(gdt);
		make_lowmem_page_readonly(gdt);
		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
J
Jeremy Fitzhardinge 已提交
338

339 340
		ctxt->gdt_frames[0] = gdt_mfn;
		ctxt->gdt_ents      = GDT_ENTRIES;
J
Jeremy Fitzhardinge 已提交
341

342 343
		ctxt->kernel_ss = __KERNEL_DS;
		ctxt->kernel_sp = idle->thread.sp0;
J
Jeremy Fitzhardinge 已提交
344

345
#ifdef CONFIG_X86_32
346 347
		ctxt->event_callback_cs     = __KERNEL_CS;
		ctxt->failsafe_callback_cs  = __KERNEL_CS;
348
#endif
349 350 351 352 353 354 355
		ctxt->event_callback_eip    =
					(unsigned long)xen_hypervisor_callback;
		ctxt->failsafe_callback_eip =
					(unsigned long)xen_failsafe_callback;
	}
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
J
Jeremy Fitzhardinge 已提交
356 357 358 359 360 361 362 363 364 365 366

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}

367
static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
J
Jeremy Fitzhardinge 已提交
368 369 370
{
	int rc;

371
	per_cpu(current_task, cpu) = idle;
372
#ifdef CONFIG_X86_32
J
Jeremy Fitzhardinge 已提交
373
	irq_ctx_init(cpu);
374 375
#else
	clear_tsk_thread_flag(idle, TIF_FORK);
376 377 378
	per_cpu(kernel_stack, cpu) =
		(unsigned long)task_stack_page(idle) -
		KERNEL_STACK_OFFSET + THREAD_SIZE;
379
#endif
380
	xen_setup_runstate_info(cpu);
J
Jeremy Fitzhardinge 已提交
381
	xen_setup_timer(cpu);
382
	xen_init_lock_cpu(cpu);
J
Jeremy Fitzhardinge 已提交
383

384 385
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

J
Jeremy Fitzhardinge 已提交
386 387 388 389 390 391 392 393
	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
394 395
		/* Just in case we booted with a single CPU. */
		alternatives_enable_smp();
J
Jeremy Fitzhardinge 已提交
396 397 398 399 400 401 402 403

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

404
	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
H
Hannes Eder 已提交
405
		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
406 407 408
		barrier();
	}

J
Jeremy Fitzhardinge 已提交
409 410 411
	return 0;
}

412
static void xen_smp_cpus_done(unsigned int max_cpus)
J
Jeremy Fitzhardinge 已提交
413 414 415
{
}

416
#ifdef CONFIG_HOTPLUG_CPU
417
static int xen_cpu_disable(void)
A
Alex Nixon 已提交
418 419 420 421 422 423 424 425 426 427 428
{
	unsigned int cpu = smp_processor_id();
	if (cpu == 0)
		return -EBUSY;

	cpu_disable_common();

	load_cr3(swapper_pg_dir);
	return 0;
}

429
static void xen_cpu_die(unsigned int cpu)
A
Alex Nixon 已提交
430
{
431
	while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
A
Alex Nixon 已提交
432 433 434
		current->state = TASK_UNINTERRUPTIBLE;
		schedule_timeout(HZ/10);
	}
435 436 437 438
	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
439 440
	if (!xen_hvm_domain())
		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
A
Alex Nixon 已提交
441 442 443 444
	xen_uninit_lock_cpu(cpu);
	xen_teardown_timer(cpu);
}

445
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
A
Alex Nixon 已提交
446 447 448 449 450 451
{
	play_dead_common();
	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
	cpu_bringup();
}

452
#else /* !CONFIG_HOTPLUG_CPU */
453
static int xen_cpu_disable(void)
454 455 456 457
{
	return -ENOSYS;
}

458
static void xen_cpu_die(unsigned int cpu)
459 460 461 462
{
	BUG();
}

463
static void xen_play_dead(void)
464 465 466 467 468
{
	BUG();
}

#endif
J
Jeremy Fitzhardinge 已提交
469 470 471 472 473 474 475 476
static void stop_self(void *v)
{
	int cpu = smp_processor_id();

	/* make sure we're not pinning something down */
	load_cr3(swapper_pg_dir);
	/* should set up a minimal gdt */

477 478
	set_cpu_online(cpu, false);

J
Jeremy Fitzhardinge 已提交
479 480 481 482
	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
	BUG();
}

483
static void xen_stop_other_cpus(int wait)
J
Jeremy Fitzhardinge 已提交
484
{
485
	smp_call_function(stop_self, NULL, wait);
J
Jeremy Fitzhardinge 已提交
486 487
}

488
static void xen_smp_send_reschedule(int cpu)
J
Jeremy Fitzhardinge 已提交
489 490 491 492
{
	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}

B
Ben Guthro 已提交
493 494
static void __xen_send_IPI_mask(const struct cpumask *mask,
			      int vector)
J
Jeremy Fitzhardinge 已提交
495 496 497
{
	unsigned cpu;

498
	for_each_cpu_and(cpu, mask, cpu_online_mask)
J
Jeremy Fitzhardinge 已提交
499 500 501
		xen_send_IPI_one(cpu, vector);
}

502
static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
503 504 505
{
	int cpu;

B
Ben Guthro 已提交
506
	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
507 508

	/* Make sure other vcpus get a chance to run if they need to. */
509
	for_each_cpu(cpu, mask) {
510
		if (xen_vcpu_stolen(cpu)) {
H
Hannes Eder 已提交
511
			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
512 513 514 515 516
			break;
		}
	}
}

517
static void xen_smp_send_call_function_single_ipi(int cpu)
518
{
B
Ben Guthro 已提交
519
	__xen_send_IPI_mask(cpumask_of(cpu),
520
			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
521 522
}

B
Ben Guthro 已提交
523 524 525 526 527 528 529 530 531 532 533 534 535 536
static inline int xen_map_vector(int vector)
{
	int xen_vector;

	switch (vector) {
	case RESCHEDULE_VECTOR:
		xen_vector = XEN_RESCHEDULE_VECTOR;
		break;
	case CALL_FUNCTION_VECTOR:
		xen_vector = XEN_CALL_FUNCTION_VECTOR;
		break;
	case CALL_FUNCTION_SINGLE_VECTOR:
		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
		break;
537 538 539
	case IRQ_WORK_VECTOR:
		xen_vector = XEN_IRQ_WORK_VECTOR;
		break;
B
Ben Guthro 已提交
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	default:
		xen_vector = -1;
		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
			vector);
	}

	return xen_vector;
}

void xen_send_IPI_mask(const struct cpumask *mask,
			      int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		__xen_send_IPI_mask(mask, xen_vector);
}

void xen_send_IPI_all(int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
}

void xen_send_IPI_self(int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		xen_send_IPI_one(smp_processor_id(), xen_vector);
}

void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
				int vector)
{
	unsigned cpu;
	unsigned int this_cpu = smp_processor_id();
S
Stefan Bader 已提交
579
	int xen_vector = xen_map_vector(vector);
B
Ben Guthro 已提交
580

S
Stefan Bader 已提交
581
	if (!(num_online_cpus() > 1) || (xen_vector < 0))
B
Ben Guthro 已提交
582 583 584 585 586 587
		return;

	for_each_cpu_and(cpu, mask, cpu_online_mask) {
		if (this_cpu == cpu)
			continue;

S
Stefan Bader 已提交
588
		xen_send_IPI_one(cpu, xen_vector);
B
Ben Guthro 已提交
589 590 591 592 593
	}
}

void xen_send_IPI_allbutself(int vector)
{
S
Stefan Bader 已提交
594
	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
B
Ben Guthro 已提交
595 596
}

J
Jeremy Fitzhardinge 已提交
597 598 599
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
	irq_enter();
600
	generic_smp_call_function_interrupt();
601
	inc_irq_stat(irq_call_count);
J
Jeremy Fitzhardinge 已提交
602 603 604 605 606
	irq_exit();

	return IRQ_HANDLED;
}

607
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
J
Jeremy Fitzhardinge 已提交
608
{
609 610
	irq_enter();
	generic_smp_call_function_single_interrupt();
611
	inc_irq_stat(irq_call_count);
612
	irq_exit();
J
Jeremy Fitzhardinge 已提交
613

614
	return IRQ_HANDLED;
J
Jeremy Fitzhardinge 已提交
615
}
616

617 618 619 620 621 622 623 624 625 626
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{
	irq_enter();
	irq_work_run();
	inc_irq_stat(apic_irq_work_irqs);
	irq_exit();

	return IRQ_HANDLED;
}

627
static const struct smp_ops xen_smp_ops __initconst = {
628 629 630 631
	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
	.smp_prepare_cpus = xen_smp_prepare_cpus,
	.smp_cpus_done = xen_smp_cpus_done,

A
Alex Nixon 已提交
632 633 634 635 636
	.cpu_up = xen_cpu_up,
	.cpu_die = xen_cpu_die,
	.cpu_disable = xen_cpu_disable,
	.play_dead = xen_play_dead,

637
	.stop_other_cpus = xen_stop_other_cpus,
638 639 640 641 642 643 644 645 646
	.smp_send_reschedule = xen_smp_send_reschedule,

	.send_call_func_ipi = xen_smp_send_call_function_ipi,
	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
};

void __init xen_smp_init(void)
{
	smp_ops = xen_smp_ops;
647
	xen_fill_possible_map();
648
	xen_init_spinlocks();
649
}
650 651 652 653 654 655 656 657 658

static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
{
	native_smp_prepare_cpus(max_cpus);
	WARN_ON(xen_smp_intr_init(0));

	xen_init_lock_cpu(0);
}

659
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
660 661
{
	int rc;
662
	rc = native_cpu_up(cpu, tidle);
663 664 665 666 667 668
	WARN_ON (xen_smp_intr_init(cpu));
	return rc;
}

static void xen_hvm_cpu_die(unsigned int cpu)
{
669
	xen_cpu_die(cpu);
670 671 672 673 674
	native_cpu_die(cpu);
}

void __init xen_hvm_smp_init(void)
{
675 676
	if (!xen_have_vector_callback)
		return;
677 678 679 680 681 682 683
	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
	smp_ops.cpu_up = xen_hvm_cpu_up;
	smp_ops.cpu_die = xen_hvm_cpu_die;
	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
}