smp.c 15.8 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Xen SMP support
 *
 * This file implements the Xen versions of smp_ops.  SMP under Xen is
 * very straightforward.  Bringing a CPU up is simply a matter of
 * loading its initial context and setting it running.
 *
 * IPIs are handled through the Xen event mechanism.
 *
 * Because virtual CPUs can be scheduled onto any real CPU, there's no
 * useful topology information for the kernel to make use of.  As a
 * result, all CPUs are treated as if they're single-core and
 * single-threaded.
 */
#include <linux/sched.h>
#include <linux/err.h>
17
#include <linux/slab.h>
J
Jeremy Fitzhardinge 已提交
18
#include <linux/smp.h>
19
#include <linux/irq_work.h>
J
Jeremy Fitzhardinge 已提交
20 21 22 23 24 25 26 27 28 29 30 31

#include <asm/paravirt.h>
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cpu.h>

#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>

#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>

32
#include <xen/xen.h>
J
Jeremy Fitzhardinge 已提交
33 34 35
#include <xen/page.h>
#include <xen/events.h>

36
#include <xen/hvc-console.h>
J
Jeremy Fitzhardinge 已提交
37 38 39
#include "xen-ops.h"
#include "mmu.h"

40
cpumask_var_t xen_cpu_initialized_map;
J
Jeremy Fitzhardinge 已提交
41

42 43 44
static DEFINE_PER_CPU(int, xen_resched_irq);
static DEFINE_PER_CPU(int, xen_callfunc_irq);
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45
static DEFINE_PER_CPU(int, xen_irq_work);
46
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
J
Jeremy Fitzhardinge 已提交
47 48

static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
49
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
J
Jeremy Fitzhardinge 已提交
51 52

/*
53
 * Reschedule call back.
J
Jeremy Fitzhardinge 已提交
54 55 56
 */
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
57
	inc_irq_stat(irq_resched_count);
58
	scheduler_ipi();
59

J
Jeremy Fitzhardinge 已提交
60 61 62
	return IRQ_HANDLED;
}

63
static void __cpuinit cpu_bringup(void)
J
Jeremy Fitzhardinge 已提交
64
{
65
	int cpu;
J
Jeremy Fitzhardinge 已提交
66 67

	cpu_init();
A
Alex Nixon 已提交
68
	touch_softlockup_watchdog();
69 70
	preempt_disable();

71
	xen_enable_sysenter();
72
	xen_enable_syscall();
J
Jeremy Fitzhardinge 已提交
73

74 75 76 77
	cpu = smp_processor_id();
	smp_store_cpu_info(cpu);
	cpu_data(cpu).x86_max_cores = 1;
	set_cpu_sibling_map(cpu);
J
Jeremy Fitzhardinge 已提交
78 79 80

	xen_setup_cpu_clockevents();

81 82
	notify_cpu_starting(cpu);

83
	set_cpu_online(cpu, true);
84

85
	this_cpu_write(cpu_state, CPU_ONLINE);
86

87 88
	wmb();

J
Jeremy Fitzhardinge 已提交
89 90 91 92
	/* We can take interrupts now: we're officially "up". */
	local_irq_enable();

	wmb();			/* make sure everything is out */
A
Alex Nixon 已提交
93 94
}

95
static void __cpuinit cpu_bringup_and_idle(void)
A
Alex Nixon 已提交
96 97
{
	cpu_bringup();
J
Jeremy Fitzhardinge 已提交
98 99 100 101 102 103
	cpu_idle();
}

static int xen_smp_intr_init(unsigned int cpu)
{
	int rc;
104
	const char *resched_name, *callfunc_name, *debug_name;
J
Jeremy Fitzhardinge 已提交
105 106 107 108 109 110 111 112 113 114

	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
				    cpu,
				    xen_reschedule_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    resched_name,
				    NULL);
	if (rc < 0)
		goto fail;
115
	per_cpu(xen_resched_irq, cpu) = rc;
J
Jeremy Fitzhardinge 已提交
116 117 118 119 120 121 122 123 124 125

	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
				    cpu,
				    xen_call_function_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
126
	per_cpu(xen_callfunc_irq, cpu) = rc;
J
Jeremy Fitzhardinge 已提交
127

128 129 130 131 132 133
	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
				     debug_name, NULL);
	if (rc < 0)
		goto fail;
134
	per_cpu(xen_debug_irq, cpu) = rc;
135

136 137 138 139 140 141 142 143 144
	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
				    cpu,
				    xen_call_function_single_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
145
	per_cpu(xen_callfuncsingle_irq, cpu) = rc;
146

147 148 149 150 151 152 153 154 155 156 157
	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
				    cpu,
				    xen_irq_work_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
	per_cpu(xen_irq_work, cpu) = rc;

J
Jeremy Fitzhardinge 已提交
158 159 160
	return 0;

 fail:
161 162 163 164 165 166 167 168 169
	if (per_cpu(xen_resched_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	if (per_cpu(xen_callfunc_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	if (per_cpu(xen_debug_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
				       NULL);
170 171
	if (per_cpu(xen_irq_work, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
172

J
Jeremy Fitzhardinge 已提交
173 174 175
	return rc;
}

176
static void __init xen_fill_possible_map(void)
J
Jeremy Fitzhardinge 已提交
177 178 179
{
	int i, rc;

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	if (xen_initial_domain())
		return;

	for (i = 0; i < nr_cpu_ids; i++) {
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
		if (rc >= 0) {
			num_processors++;
			set_cpu_possible(i, true);
		}
	}
}

static void __init xen_filter_cpu_maps(void)
{
	int i, rc;
195
	unsigned int subtract = 0;
196 197 198 199

	if (!xen_initial_domain())
		return;

200 201
	num_processors = 0;
	disabled_cpus = 0;
202
	for (i = 0; i < nr_cpu_ids; i++) {
J
Jeremy Fitzhardinge 已提交
203
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
J
Jeremy Fitzhardinge 已提交
204 205
		if (rc >= 0) {
			num_processors++;
206
			set_cpu_possible(i, true);
207 208 209
		} else {
			set_cpu_possible(i, false);
			set_cpu_present(i, false);
210
			subtract++;
J
Jeremy Fitzhardinge 已提交
211
		}
J
Jeremy Fitzhardinge 已提交
212
	}
213 214 215 216 217 218 219 220 221 222 223 224 225
#ifdef CONFIG_HOTPLUG_CPU
	/* This is akin to using 'nr_cpus' on the Linux command line.
	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
	 * have up to X, while nr_cpu_ids is greater than X. This
	 * normally is not a problem, except when CPU hotplugging
	 * is involved and then there might be more than X CPUs
	 * in the guest - which will not work as there is no
	 * hypercall to expand the max number of VCPUs an already
	 * running guest has. So cap it up to X. */
	if (subtract)
		nr_cpu_ids = nr_cpu_ids - subtract;
#endif

J
Jeremy Fitzhardinge 已提交
226 227
}

228
static void __init xen_smp_prepare_boot_cpu(void)
J
Jeremy Fitzhardinge 已提交
229 230 231 232 233 234
{
	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	/* We've switched to the "real" per-cpu gdt, so make sure the
	   old memory can be recycled */
235
	make_lowmem_page_readwrite(xen_initial_gdt);
236

237
	xen_filter_cpu_maps();
238
	xen_setup_vcpu_info_placement();
J
Jeremy Fitzhardinge 已提交
239 240
}

241
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
J
Jeremy Fitzhardinge 已提交
242 243
{
	unsigned cpu;
A
Andrew Jones 已提交
244
	unsigned int i;
J
Jeremy Fitzhardinge 已提交
245

246 247 248 249 250 251 252 253 254
	if (skip_ioapic_setup) {
		char *m = (max_cpus == 0) ?
			"The nosmp parameter is incompatible with Xen; " \
			"use Xen dom0_max_vcpus=1 parameter" :
			"The noapic parameter is incompatible with Xen";

		xen_raw_printk(m);
		panic(m);
	}
255 256
	xen_init_lock_cpu(0);

257
	smp_store_boot_cpu_info();
258
	cpu_data(0).x86_max_cores = 1;
A
Andrew Jones 已提交
259 260 261 262 263 264

	for_each_possible_cpu(i) {
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
	}
J
Jeremy Fitzhardinge 已提交
265 266 267 268 269
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

270 271 272 273
	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
		panic("could not allocate xen_cpu_initialized_map\n");

	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
J
Jeremy Fitzhardinge 已提交
274 275 276

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
277
		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
J
Jeremy Fitzhardinge 已提交
278
			continue;
279
		set_cpu_possible(cpu, false);
J
Jeremy Fitzhardinge 已提交
280 281
	}

282
	for_each_possible_cpu(cpu)
283
		set_cpu_present(cpu, true);
J
Jeremy Fitzhardinge 已提交
284 285
}

286
static int __cpuinit
J
Jeremy Fitzhardinge 已提交
287 288 289
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
290
	struct desc_struct *gdt;
291
	unsigned long gdt_mfn;
J
Jeremy Fitzhardinge 已提交
292

293
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
J
Jeremy Fitzhardinge 已提交
294 295 296 297 298 299
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

300 301
	gdt = get_cpu_gdt_table(cpu);

J
Jeremy Fitzhardinge 已提交
302 303
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ss = __KERNEL_DS;
304 305
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
306
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
307 308
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
309
#endif
J
Jeremy Fitzhardinge 已提交
310 311 312 313
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

314 315 316 317
	{
		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
		ctxt->user_regs.ds = __USER_DS;
		ctxt->user_regs.es = __USER_DS;
J
Jeremy Fitzhardinge 已提交
318

319
		xen_copy_trap_info(ctxt->trap_ctxt);
J
Jeremy Fitzhardinge 已提交
320

321
		ctxt->ldt_ents = 0;
322

323
		BUG_ON((unsigned long)gdt & ~PAGE_MASK);
J
Jeremy Fitzhardinge 已提交
324

325 326 327
		gdt_mfn = arbitrary_virt_to_mfn(gdt);
		make_lowmem_page_readonly(gdt);
		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
J
Jeremy Fitzhardinge 已提交
328

329 330
		ctxt->gdt_frames[0] = gdt_mfn;
		ctxt->gdt_ents      = GDT_ENTRIES;
J
Jeremy Fitzhardinge 已提交
331

332 333
		ctxt->kernel_ss = __KERNEL_DS;
		ctxt->kernel_sp = idle->thread.sp0;
J
Jeremy Fitzhardinge 已提交
334

335
#ifdef CONFIG_X86_32
336 337
		ctxt->event_callback_cs     = __KERNEL_CS;
		ctxt->failsafe_callback_cs  = __KERNEL_CS;
338
#endif
339 340 341 342 343 344 345
		ctxt->event_callback_eip    =
					(unsigned long)xen_hypervisor_callback;
		ctxt->failsafe_callback_eip =
					(unsigned long)xen_failsafe_callback;
	}
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
J
Jeremy Fitzhardinge 已提交
346 347 348 349 350 351 352 353 354 355 356

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}

357
static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
J
Jeremy Fitzhardinge 已提交
358 359 360
{
	int rc;

361
	per_cpu(current_task, cpu) = idle;
362
#ifdef CONFIG_X86_32
J
Jeremy Fitzhardinge 已提交
363
	irq_ctx_init(cpu);
364 365
#else
	clear_tsk_thread_flag(idle, TIF_FORK);
366 367 368
	per_cpu(kernel_stack, cpu) =
		(unsigned long)task_stack_page(idle) -
		KERNEL_STACK_OFFSET + THREAD_SIZE;
369
#endif
370
	xen_setup_runstate_info(cpu);
J
Jeremy Fitzhardinge 已提交
371
	xen_setup_timer(cpu);
372
	xen_init_lock_cpu(cpu);
J
Jeremy Fitzhardinge 已提交
373

374 375
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

J
Jeremy Fitzhardinge 已提交
376 377 378 379 380 381 382 383
	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
384 385
		/* Just in case we booted with a single CPU. */
		alternatives_enable_smp();
J
Jeremy Fitzhardinge 已提交
386 387 388 389 390 391 392 393

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

394
	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
H
Hannes Eder 已提交
395
		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
396 397 398
		barrier();
	}

J
Jeremy Fitzhardinge 已提交
399 400 401
	return 0;
}

402
static void xen_smp_cpus_done(unsigned int max_cpus)
J
Jeremy Fitzhardinge 已提交
403 404 405
{
}

406
#ifdef CONFIG_HOTPLUG_CPU
407
static int xen_cpu_disable(void)
A
Alex Nixon 已提交
408 409 410 411 412 413 414 415 416 417 418
{
	unsigned int cpu = smp_processor_id();
	if (cpu == 0)
		return -EBUSY;

	cpu_disable_common();

	load_cr3(swapper_pg_dir);
	return 0;
}

419
static void xen_cpu_die(unsigned int cpu)
A
Alex Nixon 已提交
420 421 422 423 424
{
	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
		current->state = TASK_UNINTERRUPTIBLE;
		schedule_timeout(HZ/10);
	}
425 426 427 428
	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
429
	unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
A
Alex Nixon 已提交
430 431 432 433
	xen_uninit_lock_cpu(cpu);
	xen_teardown_timer(cpu);
}

434
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
A
Alex Nixon 已提交
435 436 437 438 439 440
{
	play_dead_common();
	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
	cpu_bringup();
}

441
#else /* !CONFIG_HOTPLUG_CPU */
442
static int xen_cpu_disable(void)
443 444 445 446
{
	return -ENOSYS;
}

447
static void xen_cpu_die(unsigned int cpu)
448 449 450 451
{
	BUG();
}

452
static void xen_play_dead(void)
453 454 455 456 457
{
	BUG();
}

#endif
J
Jeremy Fitzhardinge 已提交
458 459 460 461 462 463 464 465
static void stop_self(void *v)
{
	int cpu = smp_processor_id();

	/* make sure we're not pinning something down */
	load_cr3(swapper_pg_dir);
	/* should set up a minimal gdt */

466 467
	set_cpu_online(cpu, false);

J
Jeremy Fitzhardinge 已提交
468 469 470 471
	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
	BUG();
}

472
static void xen_stop_other_cpus(int wait)
J
Jeremy Fitzhardinge 已提交
473
{
474
	smp_call_function(stop_self, NULL, wait);
J
Jeremy Fitzhardinge 已提交
475 476
}

477
static void xen_smp_send_reschedule(int cpu)
J
Jeremy Fitzhardinge 已提交
478 479 480 481
{
	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}

B
Ben Guthro 已提交
482 483
static void __xen_send_IPI_mask(const struct cpumask *mask,
			      int vector)
J
Jeremy Fitzhardinge 已提交
484 485 486
{
	unsigned cpu;

487
	for_each_cpu_and(cpu, mask, cpu_online_mask)
J
Jeremy Fitzhardinge 已提交
488 489 490
		xen_send_IPI_one(cpu, vector);
}

491
static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
492 493 494
{
	int cpu;

B
Ben Guthro 已提交
495
	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
496 497

	/* Make sure other vcpus get a chance to run if they need to. */
498
	for_each_cpu(cpu, mask) {
499
		if (xen_vcpu_stolen(cpu)) {
H
Hannes Eder 已提交
500
			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
501 502 503 504 505
			break;
		}
	}
}

506
static void xen_smp_send_call_function_single_ipi(int cpu)
507
{
B
Ben Guthro 已提交
508
	__xen_send_IPI_mask(cpumask_of(cpu),
509
			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
510 511
}

B
Ben Guthro 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524 525
static inline int xen_map_vector(int vector)
{
	int xen_vector;

	switch (vector) {
	case RESCHEDULE_VECTOR:
		xen_vector = XEN_RESCHEDULE_VECTOR;
		break;
	case CALL_FUNCTION_VECTOR:
		xen_vector = XEN_CALL_FUNCTION_VECTOR;
		break;
	case CALL_FUNCTION_SINGLE_VECTOR:
		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
		break;
526 527 528
	case IRQ_WORK_VECTOR:
		xen_vector = XEN_IRQ_WORK_VECTOR;
		break;
B
Ben Guthro 已提交
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	default:
		xen_vector = -1;
		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
			vector);
	}

	return xen_vector;
}

void xen_send_IPI_mask(const struct cpumask *mask,
			      int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		__xen_send_IPI_mask(mask, xen_vector);
}

void xen_send_IPI_all(int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
}

void xen_send_IPI_self(int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		xen_send_IPI_one(smp_processor_id(), xen_vector);
}

void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
				int vector)
{
	unsigned cpu;
	unsigned int this_cpu = smp_processor_id();

	if (!(num_online_cpus() > 1))
		return;

	for_each_cpu_and(cpu, mask, cpu_online_mask) {
		if (this_cpu == cpu)
			continue;

		xen_smp_send_call_function_single_ipi(cpu);
	}
}

void xen_send_IPI_allbutself(int vector)
{
	int xen_vector = xen_map_vector(vector);

	if (xen_vector >= 0)
		xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
}

J
Jeremy Fitzhardinge 已提交
588 589 590
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
	irq_enter();
591
	generic_smp_call_function_interrupt();
592
	inc_irq_stat(irq_call_count);
J
Jeremy Fitzhardinge 已提交
593 594 595 596 597
	irq_exit();

	return IRQ_HANDLED;
}

598
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
J
Jeremy Fitzhardinge 已提交
599
{
600 601
	irq_enter();
	generic_smp_call_function_single_interrupt();
602
	inc_irq_stat(irq_call_count);
603
	irq_exit();
J
Jeremy Fitzhardinge 已提交
604

605
	return IRQ_HANDLED;
J
Jeremy Fitzhardinge 已提交
606
}
607

608 609 610 611 612 613 614 615 616 617
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{
	irq_enter();
	irq_work_run();
	inc_irq_stat(apic_irq_work_irqs);
	irq_exit();

	return IRQ_HANDLED;
}

618
static const struct smp_ops xen_smp_ops __initconst = {
619 620 621 622
	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
	.smp_prepare_cpus = xen_smp_prepare_cpus,
	.smp_cpus_done = xen_smp_cpus_done,

A
Alex Nixon 已提交
623 624 625 626 627
	.cpu_up = xen_cpu_up,
	.cpu_die = xen_cpu_die,
	.cpu_disable = xen_cpu_disable,
	.play_dead = xen_play_dead,

628
	.stop_other_cpus = xen_stop_other_cpus,
629 630 631 632 633 634 635 636 637
	.smp_send_reschedule = xen_smp_send_reschedule,

	.send_call_func_ipi = xen_smp_send_call_function_ipi,
	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
};

void __init xen_smp_init(void)
{
	smp_ops = xen_smp_ops;
638
	xen_fill_possible_map();
639
	xen_init_spinlocks();
640
}
641 642 643 644 645 646 647 648 649

static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
{
	native_smp_prepare_cpus(max_cpus);
	WARN_ON(xen_smp_intr_init(0));

	xen_init_lock_cpu(0);
}

650
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
651 652
{
	int rc;
653
	rc = native_cpu_up(cpu, tidle);
654 655 656 657 658 659 660 661 662 663
	WARN_ON (xen_smp_intr_init(cpu));
	return rc;
}

static void xen_hvm_cpu_die(unsigned int cpu)
{
	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
664
	unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
665
	xen_uninit_lock_cpu(cpu);
666
	xen_teardown_timer(cpu);
667 668 669 670 671
	native_cpu_die(cpu);
}

void __init xen_hvm_smp_init(void)
{
672 673
	if (!xen_have_vector_callback)
		return;
674 675 676 677 678 679 680
	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
	smp_ops.cpu_up = xen_hvm_cpu_up;
	smp_ops.cpu_die = xen_hvm_cpu_die;
	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
}