smp.c 13.0 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Xen SMP support
 *
 * This file implements the Xen versions of smp_ops.  SMP under Xen is
 * very straightforward.  Bringing a CPU up is simply a matter of
 * loading its initial context and setting it running.
 *
 * IPIs are handled through the Xen event mechanism.
 *
 * Because virtual CPUs can be scheduled onto any real CPU, there's no
 * useful topology information for the kernel to make use of.  As a
 * result, all CPUs are treated as if they're single-core and
 * single-threaded.
 */
#include <linux/sched.h>
#include <linux/err.h>
17
#include <linux/slab.h>
J
Jeremy Fitzhardinge 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30
#include <linux/smp.h>

#include <asm/paravirt.h>
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cpu.h>

#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>

#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>

31
#include <xen/xen.h>
J
Jeremy Fitzhardinge 已提交
32 33 34
#include <xen/page.h>
#include <xen/events.h>

35
#include <xen/hvc-console.h>
J
Jeremy Fitzhardinge 已提交
36 37 38
#include "xen-ops.h"
#include "mmu.h"

39
cpumask_var_t xen_cpu_initialized_map;
J
Jeremy Fitzhardinge 已提交
40

41 42 43 44
static DEFINE_PER_CPU(int, xen_resched_irq);
static DEFINE_PER_CPU(int, xen_callfunc_irq);
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
J
Jeremy Fitzhardinge 已提交
45 46

static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
47
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
J
Jeremy Fitzhardinge 已提交
48 49

/*
50
 * Reschedule call back.
J
Jeremy Fitzhardinge 已提交
51 52 53
 */
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
54
	inc_irq_stat(irq_resched_count);
55
	scheduler_ipi();
56

J
Jeremy Fitzhardinge 已提交
57 58 59
	return IRQ_HANDLED;
}

60
static void __cpuinit cpu_bringup(void)
J
Jeremy Fitzhardinge 已提交
61 62 63 64
{
	int cpu = smp_processor_id();

	cpu_init();
A
Alex Nixon 已提交
65
	touch_softlockup_watchdog();
66 67
	preempt_disable();

68
	xen_enable_sysenter();
69
	xen_enable_syscall();
J
Jeremy Fitzhardinge 已提交
70

71 72 73 74
	cpu = smp_processor_id();
	smp_store_cpu_info(cpu);
	cpu_data(cpu).x86_max_cores = 1;
	set_cpu_sibling_map(cpu);
J
Jeremy Fitzhardinge 已提交
75 76 77

	xen_setup_cpu_clockevents();

78
	set_cpu_online(cpu, true);
79
	percpu_write(cpu_state, CPU_ONLINE);
80 81
	wmb();

J
Jeremy Fitzhardinge 已提交
82 83 84 85
	/* We can take interrupts now: we're officially "up". */
	local_irq_enable();

	wmb();			/* make sure everything is out */
A
Alex Nixon 已提交
86 87
}

88
static void __cpuinit cpu_bringup_and_idle(void)
A
Alex Nixon 已提交
89 90
{
	cpu_bringup();
J
Jeremy Fitzhardinge 已提交
91 92 93 94 95 96
	cpu_idle();
}

static int xen_smp_intr_init(unsigned int cpu)
{
	int rc;
97
	const char *resched_name, *callfunc_name, *debug_name;
J
Jeremy Fitzhardinge 已提交
98 99 100 101 102 103 104 105 106 107

	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
				    cpu,
				    xen_reschedule_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    resched_name,
				    NULL);
	if (rc < 0)
		goto fail;
108
	per_cpu(xen_resched_irq, cpu) = rc;
J
Jeremy Fitzhardinge 已提交
109 110 111 112 113 114 115 116 117 118

	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
				    cpu,
				    xen_call_function_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
119
	per_cpu(xen_callfunc_irq, cpu) = rc;
J
Jeremy Fitzhardinge 已提交
120

121 122 123 124 125 126
	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
				     debug_name, NULL);
	if (rc < 0)
		goto fail;
127
	per_cpu(xen_debug_irq, cpu) = rc;
128

129 130 131 132 133 134 135 136 137
	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
				    cpu,
				    xen_call_function_single_interrupt,
				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
				    callfunc_name,
				    NULL);
	if (rc < 0)
		goto fail;
138
	per_cpu(xen_callfuncsingle_irq, cpu) = rc;
139

J
Jeremy Fitzhardinge 已提交
140 141 142
	return 0;

 fail:
143 144 145 146 147 148 149 150 151
	if (per_cpu(xen_resched_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	if (per_cpu(xen_callfunc_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	if (per_cpu(xen_debug_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
				       NULL);
152

J
Jeremy Fitzhardinge 已提交
153 154 155
	return rc;
}

156
static void __init xen_fill_possible_map(void)
J
Jeremy Fitzhardinge 已提交
157 158 159
{
	int i, rc;

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
	if (xen_initial_domain())
		return;

	for (i = 0; i < nr_cpu_ids; i++) {
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
		if (rc >= 0) {
			num_processors++;
			set_cpu_possible(i, true);
		}
	}
}

static void __init xen_filter_cpu_maps(void)
{
	int i, rc;

	if (!xen_initial_domain())
		return;

179 180
	num_processors = 0;
	disabled_cpus = 0;
181
	for (i = 0; i < nr_cpu_ids; i++) {
J
Jeremy Fitzhardinge 已提交
182
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
J
Jeremy Fitzhardinge 已提交
183 184
		if (rc >= 0) {
			num_processors++;
185
			set_cpu_possible(i, true);
186 187 188
		} else {
			set_cpu_possible(i, false);
			set_cpu_present(i, false);
J
Jeremy Fitzhardinge 已提交
189
		}
J
Jeremy Fitzhardinge 已提交
190 191 192
	}
}

193
static void __init xen_smp_prepare_boot_cpu(void)
J
Jeremy Fitzhardinge 已提交
194 195 196 197 198 199
{
	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	/* We've switched to the "real" per-cpu gdt, so make sure the
	   old memory can be recycled */
200
	make_lowmem_page_readwrite(xen_initial_gdt);
201

202
	xen_filter_cpu_maps();
203
	xen_setup_vcpu_info_placement();
J
Jeremy Fitzhardinge 已提交
204 205
}

206
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
J
Jeremy Fitzhardinge 已提交
207 208
{
	unsigned cpu;
A
Andrew Jones 已提交
209
	unsigned int i;
J
Jeremy Fitzhardinge 已提交
210

211 212 213 214 215 216 217 218 219
	if (skip_ioapic_setup) {
		char *m = (max_cpus == 0) ?
			"The nosmp parameter is incompatible with Xen; " \
			"use Xen dom0_max_vcpus=1 parameter" :
			"The noapic parameter is incompatible with Xen";

		xen_raw_printk(m);
		panic(m);
	}
220 221
	xen_init_lock_cpu(0);

J
Jeremy Fitzhardinge 已提交
222
	smp_store_cpu_info(0);
223
	cpu_data(0).x86_max_cores = 1;
A
Andrew Jones 已提交
224 225 226 227 228 229

	for_each_possible_cpu(i) {
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
	}
J
Jeremy Fitzhardinge 已提交
230 231 232 233 234
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

235 236 237 238
	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
		panic("could not allocate xen_cpu_initialized_map\n");

	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
J
Jeremy Fitzhardinge 已提交
239 240 241

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
242
		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
J
Jeremy Fitzhardinge 已提交
243
			continue;
244
		set_cpu_possible(cpu, false);
J
Jeremy Fitzhardinge 已提交
245 246 247 248 249 250 251 252 253 254 255 256
	}

	for_each_possible_cpu (cpu) {
		struct task_struct *idle;

		if (cpu == 0)
			continue;

		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);

257
		set_cpu_present(cpu, true);
J
Jeremy Fitzhardinge 已提交
258 259 260
	}
}

261
static int __cpuinit
J
Jeremy Fitzhardinge 已提交
262 263 264
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
265
	struct desc_struct *gdt;
266
	unsigned long gdt_mfn;
J
Jeremy Fitzhardinge 已提交
267

268
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
J
Jeremy Fitzhardinge 已提交
269 270 271 272 273 274
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

275 276
	gdt = get_cpu_gdt_table(cpu);

J
Jeremy Fitzhardinge 已提交
277 278 279 280
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
281 282
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
283
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
284 285
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
286
#endif
J
Jeremy Fitzhardinge 已提交
287 288 289 290 291 292 293 294 295
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

296
	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
297 298

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
299
	make_lowmem_page_readonly(gdt);
300
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
J
Jeremy Fitzhardinge 已提交
301

302
	ctxt->gdt_frames[0] = gdt_mfn;
303
	ctxt->gdt_ents      = GDT_ENTRIES;
J
Jeremy Fitzhardinge 已提交
304 305

	ctxt->user_regs.cs = __KERNEL_CS;
306
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
J
Jeremy Fitzhardinge 已提交
307 308

	ctxt->kernel_ss = __KERNEL_DS;
309
	ctxt->kernel_sp = idle->thread.sp0;
J
Jeremy Fitzhardinge 已提交
310

311
#ifdef CONFIG_X86_32
J
Jeremy Fitzhardinge 已提交
312 313
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
314 315
#endif
	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
J
Jeremy Fitzhardinge 已提交
316 317 318 319 320 321 322 323 324 325 326 327
	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}

328
static int __cpuinit xen_cpu_up(unsigned int cpu)
J
Jeremy Fitzhardinge 已提交
329 330 331 332
{
	struct task_struct *idle = idle_task(cpu);
	int rc;

333
	per_cpu(current_task, cpu) = idle;
334
#ifdef CONFIG_X86_32
J
Jeremy Fitzhardinge 已提交
335
	irq_ctx_init(cpu);
336 337
#else
	clear_tsk_thread_flag(idle, TIF_FORK);
338 339 340
	per_cpu(kernel_stack, cpu) =
		(unsigned long)task_stack_page(idle) -
		KERNEL_STACK_OFFSET + THREAD_SIZE;
341
#endif
342
	xen_setup_runstate_info(cpu);
J
Jeremy Fitzhardinge 已提交
343
	xen_setup_timer(cpu);
344
	xen_init_lock_cpu(cpu);
J
Jeremy Fitzhardinge 已提交
345

346 347
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

J
Jeremy Fitzhardinge 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
	/* make sure interrupts start blocked */
	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;

	rc = cpu_initialize_context(cpu, idle);
	if (rc)
		return rc;

	if (num_online_cpus() == 1)
		alternatives_smp_switch(1);

	rc = xen_smp_intr_init(cpu);
	if (rc)
		return rc;

	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
	BUG_ON(rc);

365
	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
H
Hannes Eder 已提交
366
		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
367 368 369
		barrier();
	}

J
Jeremy Fitzhardinge 已提交
370 371 372
	return 0;
}

373
static void xen_smp_cpus_done(unsigned int max_cpus)
J
Jeremy Fitzhardinge 已提交
374 375 376
{
}

377
#ifdef CONFIG_HOTPLUG_CPU
378
static int xen_cpu_disable(void)
A
Alex Nixon 已提交
379 380 381 382 383 384 385 386 387 388 389
{
	unsigned int cpu = smp_processor_id();
	if (cpu == 0)
		return -EBUSY;

	cpu_disable_common();

	load_cr3(swapper_pg_dir);
	return 0;
}

390
static void xen_cpu_die(unsigned int cpu)
A
Alex Nixon 已提交
391 392 393 394 395
{
	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
		current->state = TASK_UNINTERRUPTIBLE;
		schedule_timeout(HZ/10);
	}
396 397 398 399
	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
A
Alex Nixon 已提交
400 401 402 403 404 405 406
	xen_uninit_lock_cpu(cpu);
	xen_teardown_timer(cpu);

	if (num_online_cpus() == 1)
		alternatives_smp_switch(0);
}

407
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
A
Alex Nixon 已提交
408 409 410 411 412 413
{
	play_dead_common();
	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
	cpu_bringup();
}

414
#else /* !CONFIG_HOTPLUG_CPU */
415
static int xen_cpu_disable(void)
416 417 418 419
{
	return -ENOSYS;
}

420
static void xen_cpu_die(unsigned int cpu)
421 422 423 424
{
	BUG();
}

425
static void xen_play_dead(void)
426 427 428 429 430
{
	BUG();
}

#endif
J
Jeremy Fitzhardinge 已提交
431 432 433 434 435 436 437 438
static void stop_self(void *v)
{
	int cpu = smp_processor_id();

	/* make sure we're not pinning something down */
	load_cr3(swapper_pg_dir);
	/* should set up a minimal gdt */

439 440
	set_cpu_online(cpu, false);

J
Jeremy Fitzhardinge 已提交
441 442 443 444
	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
	BUG();
}

445
static void xen_stop_other_cpus(int wait)
J
Jeremy Fitzhardinge 已提交
446
{
447
	smp_call_function(stop_self, NULL, wait);
J
Jeremy Fitzhardinge 已提交
448 449
}

450
static void xen_smp_send_reschedule(int cpu)
J
Jeremy Fitzhardinge 已提交
451 452 453 454
{
	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}

455 456
static void xen_send_IPI_mask(const struct cpumask *mask,
			      enum ipi_vector vector)
J
Jeremy Fitzhardinge 已提交
457 458 459
{
	unsigned cpu;

460
	for_each_cpu_and(cpu, mask, cpu_online_mask)
J
Jeremy Fitzhardinge 已提交
461 462 463
		xen_send_IPI_one(cpu, vector);
}

464
static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
465 466 467 468 469 470
{
	int cpu;

	xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);

	/* Make sure other vcpus get a chance to run if they need to. */
471
	for_each_cpu(cpu, mask) {
472
		if (xen_vcpu_stolen(cpu)) {
H
Hannes Eder 已提交
473
			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
474 475 476 477 478
			break;
		}
	}
}

479
static void xen_smp_send_call_function_single_ipi(int cpu)
480
{
481
	xen_send_IPI_mask(cpumask_of(cpu),
482
			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
483 484
}

J
Jeremy Fitzhardinge 已提交
485 486 487
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
	irq_enter();
488
	generic_smp_call_function_interrupt();
489
	inc_irq_stat(irq_call_count);
J
Jeremy Fitzhardinge 已提交
490 491 492 493 494
	irq_exit();

	return IRQ_HANDLED;
}

495
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
J
Jeremy Fitzhardinge 已提交
496
{
497 498
	irq_enter();
	generic_smp_call_function_single_interrupt();
499
	inc_irq_stat(irq_call_count);
500
	irq_exit();
J
Jeremy Fitzhardinge 已提交
501

502
	return IRQ_HANDLED;
J
Jeremy Fitzhardinge 已提交
503
}
504

505
static const struct smp_ops xen_smp_ops __initconst = {
506 507 508 509
	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
	.smp_prepare_cpus = xen_smp_prepare_cpus,
	.smp_cpus_done = xen_smp_cpus_done,

A
Alex Nixon 已提交
510 511 512 513 514
	.cpu_up = xen_cpu_up,
	.cpu_die = xen_cpu_die,
	.cpu_disable = xen_cpu_disable,
	.play_dead = xen_play_dead,

515
	.stop_other_cpus = xen_stop_other_cpus,
516 517 518 519 520 521 522 523 524
	.smp_send_reschedule = xen_smp_send_reschedule,

	.send_call_func_ipi = xen_smp_send_call_function_ipi,
	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
};

void __init xen_smp_init(void)
{
	smp_ops = xen_smp_ops;
525
	xen_fill_possible_map();
526
	xen_init_spinlocks();
527
}
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556

static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
{
	native_smp_prepare_cpus(max_cpus);
	WARN_ON(xen_smp_intr_init(0));

	xen_init_lock_cpu(0);
	xen_init_spinlocks();
}

static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
{
	int rc;
	rc = native_cpu_up(cpu);
	WARN_ON (xen_smp_intr_init(cpu));
	return rc;
}

static void xen_hvm_cpu_die(unsigned int cpu)
{
	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
	native_cpu_die(cpu);
}

void __init xen_hvm_smp_init(void)
{
557 558
	if (!xen_have_vector_callback)
		return;
559 560 561 562 563 564 565
	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
	smp_ops.cpu_up = xen_hvm_cpu_up;
	smp_ops.cpu_die = xen_hvm_cpu_die;
	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
}