smp.c 9.3 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7
 */
8
#include <linux/cpu.h>
9 10 11 12 13 14 15 16 17
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/module.h>

#include <asm/mmu_context.h>
#include <asm/time.h>
18
#include <asm/setup.h>
19 20 21

#include <asm/octeon/octeon.h>

22 23
#include "octeon_boot.h"

24 25 26 27
volatile unsigned long octeon_processor_boot = 0xff;
volatile unsigned long octeon_processor_sp;
volatile unsigned long octeon_processor_gp;

28
#ifdef CONFIG_HOTPLUG_CPU
D
David Daney 已提交
29 30
uint64_t octeon_bootloader_entry_addr;
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
31 32
#endif

33 34 35 36 37 38
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
	const int coreid = cvmx_get_core_num();
	uint64_t action;

	/* Load the mailbox register to figure out what we're supposed to do */
39
	action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
40 41 42 43 44

	/* Clear the mailbox to clear the interrupt */
	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);

	if (action & SMP_CALL_FUNCTION)
45
		generic_smp_call_function_interrupt();
46 47
	if (action & SMP_RESCHEDULE_YOURSELF)
		scheduler_ipi();
48 49 50 51 52 53 54 55 56

	/* Check if we've been told to flush the icache */
	if (action & SMP_ICACHE_FLUSH)
		asm volatile ("synci 0($0)\n");
	return IRQ_HANDLED;
}

/**
 * Cause the function described by call_data to be executed on the passed
R
Ralf Baechle 已提交
57
 * cpu.	 When the function has finished, increment the finished field of
58 59 60 61 62 63 64 65 66 67 68 69
 * call_data.
 */
void octeon_send_ipi_single(int cpu, unsigned int action)
{
	int coreid = cpu_logical_map(cpu);
	/*
	pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
	       coreid, action);
	*/
	cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
}

70 71
static inline void octeon_send_ipi_mask(const struct cpumask *mask,
					unsigned int action)
72 73 74
{
	unsigned int i;

75
	for_each_cpu(i, mask)
76 77 78 79
		octeon_send_ipi_single(i, action);
}

/**
80
 * Detect available CPUs, populate cpu_possible_mask
81
 */
82 83 84
static void octeon_smp_hotplug_setup(void)
{
#ifdef CONFIG_HOTPLUG_CPU
D
David Daney 已提交
85 86
	struct linux_app_boot_info *labi;

87 88 89
	if (!setup_max_cpus)
		return;

D
David Daney 已提交
90
	labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
91 92 93 94
	if (labi->labi_signature != LABI_SIGNATURE) {
		pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
		return;
	}
D
David Daney 已提交
95 96

	octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
97 98 99
#endif
}

100 101 102 103 104 105
static void octeon_smp_setup(void)
{
	const int coreid = cvmx_get_core_num();
	int cpus;
	int id;
	int core_mask = octeon_get_boot_coremask();
106 107
	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();

108 109 110 111 112 113 114 115 116
#ifdef CONFIG_HOTPLUG_CPU
	unsigned int num_cores = cvmx_octeon_num_cores();
#endif

	/* The present CPUs are initially just the boot cpu (CPU 0). */
	for (id = 0; id < NR_CPUS; id++) {
		set_cpu_possible(id, id == 0);
		set_cpu_present(id, id == 0);
	}
117 118 119 120

	__cpu_number_map[coreid] = 0;
	__cpu_logical_map[0] = coreid;

121
	/* The present CPUs get the lowest CPU numbers. */
122
	cpus = 1;
123
	for (id = 0; id < NR_CPUS; id++) {
124
		if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
125 126 127 128 129 130 131 132 133 134
			set_cpu_possible(cpus, true);
			set_cpu_present(cpus, true);
			__cpu_number_map[id] = cpus;
			__cpu_logical_map[cpus] = id;
			cpus++;
		}
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
R
Ralf Baechle 已提交
135 136
	 * The possible CPUs are all those present on the chip.	 We
	 * will assign CPU numbers for possible cores as well.	Cores
137 138
	 * are always consecutively numberd from 0.
	 */
139 140
	for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
		     id < num_cores && id < NR_CPUS; id++) {
141 142
		if (!(core_mask & (1 << id))) {
			set_cpu_possible(cpus, true);
143 144 145 146 147
			__cpu_number_map[id] = cpus;
			__cpu_logical_map[cpus] = id;
			cpus++;
		}
	}
148
#endif
149 150

	octeon_smp_hotplug_setup();
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
}

/**
 * Firmware CPU startup hook
 *
 */
static void octeon_boot_secondary(int cpu, struct task_struct *idle)
{
	int count;

	pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
		cpu_logical_map(cpu));

	octeon_processor_sp = __KSTK_TOS(idle);
	octeon_processor_gp = (unsigned long)(task_thread_info(idle));
	octeon_processor_boot = cpu_logical_map(cpu);
	mb();

	count = 10000;
	while (octeon_processor_sp && count) {
		/* Waiting for processor to get the SP and GP */
		udelay(1);
		count--;
	}
	if (count == 0)
		pr_err("Secondary boot timeout\n");
}

/**
 * After we've done initial boot, this function is called to allow the
 * board code to clean up state, if needed
 */
183
static void octeon_init_secondary(void)
184
{
D
David Daney 已提交
185
	unsigned int sr;
186

D
David Daney 已提交
187 188 189 190
	sr = set_c0_status(ST0_BEV);
	write_c0_ebase((u32)ebase);
	write_c0_status(sr);

191 192
	octeon_check_cpu_bist();
	octeon_init_cvmcount();
193 194

	octeon_irq_setup_secondary();
195 196 197 198 199 200 201 202
}

/**
 * Callout to firmware before smp_init
 *
 */
void octeon_prepare_cpus(unsigned int max_cpus)
{
203 204 205 206 207
	/*
	 * Only the low order mailbox bits are used for IPIs, leave
	 * the other bits alone.
	 */
	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
208 209 210
	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
			mailbox_interrupt)) {
211
		panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
212 213 214 215 216 217 218 219 220 221 222 223 224
	}
}

/**
 * Last chance for the board code to finish SMP initialization before
 * the CPU is "online".
 */
static void octeon_smp_finish(void)
{
	octeon_user_io_init();

	/* to generate the first CPU timer interrupt */
	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
225
	local_irq_enable();
226 227
}

228 229 230 231 232 233 234 235 236 237 238 239
#ifdef CONFIG_HOTPLUG_CPU

/* State of each CPU. */
DEFINE_PER_CPU(int, cpu_state);

static int octeon_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EBUSY;

240 241 242
	if (!octeon_bootloader_entry_addr)
		return -ENOTSUPP;

243
	set_cpu_online(cpu, false);
244
	cpumask_clear_cpu(cpu, &cpu_callin_map);
245
	octeon_fixup_irqs();
246

247
	__flush_cache_all();
248 249 250 251 252 253 254 255
	local_flush_tlb_all();

	return 0;
}

static void octeon_cpu_die(unsigned int cpu)
{
	int coreid = cpu_logical_map(cpu);
D
David Daney 已提交
256 257
	uint32_t mask, new_mask;
	const struct cvmx_bootmem_named_block_desc *block_desc;
258 259 260 261 262 263 264 265

	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
		cpu_relax();

	/*
	 * This is a bit complicated strategics of getting/settig available
	 * cores mask, copied from bootloader
	 */
D
David Daney 已提交
266 267

	mask = 1 << coreid;
268 269 270 271
	/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);

	if (!block_desc) {
D
David Daney 已提交
272
		struct linux_app_boot_info *labi;
273

D
David Daney 已提交
274
		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
275

D
David Daney 已提交
276 277 278 279 280 281 282
		labi->avail_coremask |= mask;
		new_mask = labi->avail_coremask;
	} else {		       /* alternative, already initialized */
		uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
							       AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
		*p |= mask;
		new_mask = *p;
283 284
	}

D
David Daney 已提交
285 286
	pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
	mb();
287 288 289 290 291 292
	cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
	cvmx_write_csr(CVMX_CIU_PP_RST, 0);
}

void play_dead(void)
{
D
David Daney 已提交
293
	int cpu = cpu_number_map(cvmx_get_core_num());
294 295 296

	idle_task_exit();
	octeon_processor_boot = 0xff;
D
David Daney 已提交
297 298 299
	per_cpu(cpu_state, cpu) = CPU_DEAD;

	mb();
300 301 302 303 304 305 306 307 308

	while (1)	/* core will be reset here */
		;
}

extern void kernel_entry(unsigned long arg1, ...);

static void start_after_reset(void)
{
R
Ralf Baechle 已提交
309
	kernel_entry(0, 0, 0);	/* set a2 = 0 for secondary core */
310 311
}

D
David Daney 已提交
312
static int octeon_update_boot_vector(unsigned int cpu)
313 314 315
{

	int coreid = cpu_logical_map(cpu);
D
David Daney 已提交
316 317
	uint32_t avail_coremask;
	const struct cvmx_bootmem_named_block_desc *block_desc;
318
	struct boot_init_vector *boot_vect =
D
David Daney 已提交
319
		(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
320 321 322 323

	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);

	if (!block_desc) {
D
David Daney 已提交
324 325 326 327 328 329
		struct linux_app_boot_info *labi;

		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);

		avail_coremask = labi->avail_coremask;
		labi->avail_coremask &= ~(1 << coreid);
330
	} else {		       /* alternative, already initialized */
D
David Daney 已提交
331 332
		avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
			block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
333 334 335
	}

	if (!(avail_coremask & (1 << coreid))) {
336
		/* core not available, assume, that caught by simple-executive */
337 338 339 340 341 342
		cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
		cvmx_write_csr(CVMX_CIU_PP_RST, 0);
	}

	boot_vect[coreid].app_start_func_addr =
		(uint32_t) (unsigned long) start_after_reset;
D
David Daney 已提交
343
	boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
344

D
David Daney 已提交
345
	mb();
346 347 348 349 350 351

	cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);

	return 0;
}

352
static int octeon_cpu_callback(struct notifier_block *nfb,
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;

	switch (action) {
	case CPU_UP_PREPARE:
		octeon_update_boot_vector(cpu);
		break;
	case CPU_ONLINE:
		pr_info("Cpu %d online\n", cpu);
		break;
	case CPU_DEAD:
		break;
	}

	return NOTIFY_OK;
}

371
static int register_cavium_notifier(void)
372
{
373
	hotcpu_notifier(octeon_cpu_callback, 0);
374 375 376 377
	return 0;
}
late_initcall(register_cavium_notifier);

R
Ralf Baechle 已提交
378
#endif	/* CONFIG_HOTPLUG_CPU */
379

380 381 382 383 384 385 386 387
struct plat_smp_ops octeon_smp_ops = {
	.send_ipi_single	= octeon_send_ipi_single,
	.send_ipi_mask		= octeon_send_ipi_mask,
	.init_secondary		= octeon_init_secondary,
	.smp_finish		= octeon_smp_finish,
	.boot_secondary		= octeon_boot_secondary,
	.smp_setup		= octeon_smp_setup,
	.prepare_cpus		= octeon_prepare_cpus,
388 389 390 391
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable		= octeon_cpu_disable,
	.cpu_die		= octeon_cpu_die,
#endif
392
};