smp.c 9.2 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7
 */
8
#include <linux/cpu.h>
9 10 11 12 13 14 15 16 17
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/module.h>

#include <asm/mmu_context.h>
#include <asm/time.h>
18
#include <asm/setup.h>
19 20 21

#include <asm/octeon/octeon.h>

22 23
#include "octeon_boot.h"

24 25 26 27
volatile unsigned long octeon_processor_boot = 0xff;
volatile unsigned long octeon_processor_sp;
volatile unsigned long octeon_processor_gp;

28
#ifdef CONFIG_HOTPLUG_CPU
D
David Daney 已提交
29 30
uint64_t octeon_bootloader_entry_addr;
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
31 32
#endif

33 34 35 36 37 38
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
	const int coreid = cvmx_get_core_num();
	uint64_t action;

	/* Load the mailbox register to figure out what we're supposed to do */
39
	action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
40 41 42 43 44 45

	/* Clear the mailbox to clear the interrupt */
	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);

	if (action & SMP_CALL_FUNCTION)
		smp_call_function_interrupt();
46 47
	if (action & SMP_RESCHEDULE_YOURSELF)
		scheduler_ipi();
48 49 50 51 52 53 54 55 56

	/* Check if we've been told to flush the icache */
	if (action & SMP_ICACHE_FLUSH)
		asm volatile ("synci 0($0)\n");
	return IRQ_HANDLED;
}

/**
 * Cause the function described by call_data to be executed on the passed
R
Ralf Baechle 已提交
57
 * cpu.	 When the function has finished, increment the finished field of
58 59 60 61 62 63 64 65 66 67 68 69
 * call_data.
 */
void octeon_send_ipi_single(int cpu, unsigned int action)
{
	int coreid = cpu_logical_map(cpu);
	/*
	pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
	       coreid, action);
	*/
	cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
}

70 71
static inline void octeon_send_ipi_mask(const struct cpumask *mask,
					unsigned int action)
72 73 74
{
	unsigned int i;

75
	for_each_cpu_mask(i, *mask)
76 77 78 79
		octeon_send_ipi_single(i, action);
}

/**
80
 * Detect available CPUs, populate cpu_possible_mask
81
 */
82 83 84
static void octeon_smp_hotplug_setup(void)
{
#ifdef CONFIG_HOTPLUG_CPU
D
David Daney 已提交
85 86
	struct linux_app_boot_info *labi;

87 88 89
	if (!setup_max_cpus)
		return;

D
David Daney 已提交
90
	labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
91 92 93 94
	if (labi->labi_signature != LABI_SIGNATURE) {
		pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
		return;
	}
D
David Daney 已提交
95 96

	octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
97 98 99
#endif
}

100 101 102 103 104 105
static void octeon_smp_setup(void)
{
	const int coreid = cvmx_get_core_num();
	int cpus;
	int id;
	int core_mask = octeon_get_boot_coremask();
106 107 108 109 110 111 112 113 114
#ifdef CONFIG_HOTPLUG_CPU
	unsigned int num_cores = cvmx_octeon_num_cores();
#endif

	/* The present CPUs are initially just the boot cpu (CPU 0). */
	for (id = 0; id < NR_CPUS; id++) {
		set_cpu_possible(id, id == 0);
		set_cpu_present(id, id == 0);
	}
115 116 117 118

	__cpu_number_map[coreid] = 0;
	__cpu_logical_map[0] = coreid;

119
	/* The present CPUs get the lowest CPU numbers. */
120
	cpus = 1;
121
	for (id = 0; id < NR_CPUS; id++) {
122
		if ((id != coreid) && (core_mask & (1 << id))) {
123 124 125 126 127 128 129 130 131 132
			set_cpu_possible(cpus, true);
			set_cpu_present(cpus, true);
			__cpu_number_map[id] = cpus;
			__cpu_logical_map[cpus] = id;
			cpus++;
		}
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
R
Ralf Baechle 已提交
133 134
	 * The possible CPUs are all those present on the chip.	 We
	 * will assign CPU numbers for possible cores as well.	Cores
135 136
	 * are always consecutively numberd from 0.
	 */
137 138
	for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
		     id < num_cores && id < NR_CPUS; id++) {
139 140
		if (!(core_mask & (1 << id))) {
			set_cpu_possible(cpus, true);
141 142 143 144 145
			__cpu_number_map[id] = cpus;
			__cpu_logical_map[cpus] = id;
			cpus++;
		}
	}
146
#endif
147 148

	octeon_smp_hotplug_setup();
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
}

/**
 * Firmware CPU startup hook
 *
 */
static void octeon_boot_secondary(int cpu, struct task_struct *idle)
{
	int count;

	pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
		cpu_logical_map(cpu));

	octeon_processor_sp = __KSTK_TOS(idle);
	octeon_processor_gp = (unsigned long)(task_thread_info(idle));
	octeon_processor_boot = cpu_logical_map(cpu);
	mb();

	count = 10000;
	while (octeon_processor_sp && count) {
		/* Waiting for processor to get the SP and GP */
		udelay(1);
		count--;
	}
	if (count == 0)
		pr_err("Secondary boot timeout\n");
}

/**
 * After we've done initial boot, this function is called to allow the
 * board code to clean up state, if needed
 */
181
static void octeon_init_secondary(void)
182
{
D
David Daney 已提交
183
	unsigned int sr;
184

D
David Daney 已提交
185 186 187 188
	sr = set_c0_status(ST0_BEV);
	write_c0_ebase((u32)ebase);
	write_c0_status(sr);

189 190
	octeon_check_cpu_bist();
	octeon_init_cvmcount();
191 192

	octeon_irq_setup_secondary();
193 194 195 196 197 198 199 200
}

/**
 * Callout to firmware before smp_init
 *
 */
void octeon_prepare_cpus(unsigned int max_cpus)
{
201 202 203 204 205
	/*
	 * Only the low order mailbox bits are used for IPIs, leave
	 * the other bits alone.
	 */
	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
206 207 208
	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
			mailbox_interrupt)) {
209
		panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
210 211 212 213 214 215 216 217 218 219 220 221 222
	}
}

/**
 * Last chance for the board code to finish SMP initialization before
 * the CPU is "online".
 */
static void octeon_smp_finish(void)
{
	octeon_user_io_init();

	/* to generate the first CPU timer interrupt */
	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
223
	local_irq_enable();
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237
#ifdef CONFIG_HOTPLUG_CPU

/* State of each CPU. */
DEFINE_PER_CPU(int, cpu_state);

static int octeon_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EBUSY;

238 239 240
	if (!octeon_bootloader_entry_addr)
		return -ENOTSUPP;

241
	set_cpu_online(cpu, false);
242
	cpu_clear(cpu, cpu_callin_map);
243
	octeon_fixup_irqs();
244 245 246 247 248 249 250 251 252 253

	flush_cache_all();
	local_flush_tlb_all();

	return 0;
}

static void octeon_cpu_die(unsigned int cpu)
{
	int coreid = cpu_logical_map(cpu);
D
David Daney 已提交
254 255
	uint32_t mask, new_mask;
	const struct cvmx_bootmem_named_block_desc *block_desc;
256 257 258 259 260 261 262 263

	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
		cpu_relax();

	/*
	 * This is a bit complicated strategics of getting/settig available
	 * cores mask, copied from bootloader
	 */
D
David Daney 已提交
264 265

	mask = 1 << coreid;
266 267 268 269
	/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);

	if (!block_desc) {
D
David Daney 已提交
270
		struct linux_app_boot_info *labi;
271

D
David Daney 已提交
272
		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
273

D
David Daney 已提交
274 275 276 277 278 279 280
		labi->avail_coremask |= mask;
		new_mask = labi->avail_coremask;
	} else {		       /* alternative, already initialized */
		uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
							       AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
		*p |= mask;
		new_mask = *p;
281 282
	}

D
David Daney 已提交
283 284
	pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
	mb();
285 286 287 288 289 290
	cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
	cvmx_write_csr(CVMX_CIU_PP_RST, 0);
}

void play_dead(void)
{
D
David Daney 已提交
291
	int cpu = cpu_number_map(cvmx_get_core_num());
292 293 294

	idle_task_exit();
	octeon_processor_boot = 0xff;
D
David Daney 已提交
295 296 297
	per_cpu(cpu_state, cpu) = CPU_DEAD;

	mb();
298 299 300 301 302 303 304 305 306

	while (1)	/* core will be reset here */
		;
}

extern void kernel_entry(unsigned long arg1, ...);

static void start_after_reset(void)
{
R
Ralf Baechle 已提交
307
	kernel_entry(0, 0, 0);	/* set a2 = 0 for secondary core */
308 309
}

D
David Daney 已提交
310
static int octeon_update_boot_vector(unsigned int cpu)
311 312 313
{

	int coreid = cpu_logical_map(cpu);
D
David Daney 已提交
314 315
	uint32_t avail_coremask;
	const struct cvmx_bootmem_named_block_desc *block_desc;
316
	struct boot_init_vector *boot_vect =
D
David Daney 已提交
317
		(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
318 319 320 321

	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);

	if (!block_desc) {
D
David Daney 已提交
322 323 324 325 326 327
		struct linux_app_boot_info *labi;

		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);

		avail_coremask = labi->avail_coremask;
		labi->avail_coremask &= ~(1 << coreid);
328
	} else {		       /* alternative, already initialized */
D
David Daney 已提交
329 330
		avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
			block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
331 332 333 334 335 336 337 338 339 340
	}

	if (!(avail_coremask & (1 << coreid))) {
		/* core not available, assume, that catched by simple-executive */
		cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
		cvmx_write_csr(CVMX_CIU_PP_RST, 0);
	}

	boot_vect[coreid].app_start_func_addr =
		(uint32_t) (unsigned long) start_after_reset;
D
David Daney 已提交
341
	boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
342

D
David Daney 已提交
343
	mb();
344 345 346 347 348 349

	cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);

	return 0;
}

350
static int octeon_cpu_callback(struct notifier_block *nfb,
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;

	switch (action) {
	case CPU_UP_PREPARE:
		octeon_update_boot_vector(cpu);
		break;
	case CPU_ONLINE:
		pr_info("Cpu %d online\n", cpu);
		break;
	case CPU_DEAD:
		break;
	}

	return NOTIFY_OK;
}

369
static int register_cavium_notifier(void)
370
{
371
	hotcpu_notifier(octeon_cpu_callback, 0);
372 373 374 375
	return 0;
}
late_initcall(register_cavium_notifier);

R
Ralf Baechle 已提交
376
#endif	/* CONFIG_HOTPLUG_CPU */
377

378 379 380 381 382 383 384 385
struct plat_smp_ops octeon_smp_ops = {
	.send_ipi_single	= octeon_send_ipi_single,
	.send_ipi_mask		= octeon_send_ipi_mask,
	.init_secondary		= octeon_init_secondary,
	.smp_finish		= octeon_smp_finish,
	.boot_secondary		= octeon_boot_secondary,
	.smp_setup		= octeon_smp_setup,
	.prepare_cpus		= octeon_prepare_cpus,
386 387 388 389
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable		= octeon_cpu_disable,
	.cpu_die		= octeon_cpu_die,
#endif
390
};