smp.c 11.3 KB
Newer Older
1
/*
2
 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
3
 *
4 5
 * Copyright 2007-2009 Analog Devices Inc.
 *                         Philippe Gerum <rpm@xenomai.org>
6
 *
7
 * Licensed under the GPL-2.
8 9 10 11 12 13 14 15 16 17 18 19 20 21
 */

#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
22
#include <linux/cpumask.h>
23 24
#include <linux/seq_file.h>
#include <linux/irq.h>
25
#include <linux/slab.h>
26 27 28 29 30 31 32 33
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
34
#include <asm/time.h>
35 36
#include <linux/err.h>

G
Graf Yang 已提交
37 38 39 40
/*
 * Anomaly notes:
 * 05000120 - we always define corelock as 32-bit integer in L2
 */
41 42
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));

43 44 45 46
#ifdef CONFIG_ICACHE_FLUSH_L1
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
#endif

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
	*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
	*init_saved_dcplb_fault_addr_coreb;

#define BFIN_IPI_RESCHEDULE   0
#define BFIN_IPI_CALL_FUNC    1
#define BFIN_IPI_CPU_STOP     2

struct blackfin_flush_data {
	unsigned long start;
	unsigned long end;
};

void *secondary_stack;


struct smp_call_struct {
	void (*func)(void *info);
	void *info;
	int wait;
67
	cpumask_t *waitmask;
68 69 70 71 72 73 74 75 76 77 78
};

static struct blackfin_flush_data smp_flush_data;

static DEFINE_SPINLOCK(stop_lock);

struct ipi_message {
	unsigned long type;
	struct smp_call_struct call_struct;
};

79 80 81 82
/* A magic number - stress test shows this is safe for common cases */
#define BFIN_IPI_MSGQ_LEN 5

/* Simple FIFO buffer, overflow leads to panic */
83 84 85
struct ipi_message_queue {
	spinlock_t lock;
	unsigned long count;
86 87
	unsigned long head; /* head of the queue */
	struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
};

static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);

static void ipi_cpu_stop(unsigned int cpu)
{
	spin_lock(&stop_lock);
	printk(KERN_CRIT "CPU%u: stopping\n", cpu);
	dump_stack();
	spin_unlock(&stop_lock);

	cpu_clear(cpu, cpu_online_map);

	local_irq_disable();

	while (1)
		SSYNC();
}

static void ipi_flush_icache(void *info)
{
	struct blackfin_flush_data *fdata = info;

	/* Invalidate the memory holding the bounds of the flushed region. */
112 113
	invalidate_dcache_range((unsigned long)fdata,
		(unsigned long)fdata + sizeof(*fdata));
114

115
	flush_icache_range(fdata->start, fdata->end);
116 117 118 119 120 121 122 123 124 125 126
}

static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
{
	int wait;
	void (*func)(void *info);
	void *info;
	func = msg->call_struct.func;
	info = msg->call_struct.info;
	wait = msg->call_struct.wait;
	func(info);
127 128 129 130 131 132 133 134 135
	if (wait) {
#ifdef __ARCH_SYNC_CORE_DCACHE
		/*
		 * 'wait' usually means synchronization between CPUs.
		 * Invalidate D cache in case shared data was changed
		 * by func() to ensure cache coherence.
		 */
		resync_core_dcache();
#endif
136 137
		cpu_clear(cpu, *msg->call_struct.waitmask);
	}
138 139
}

140 141 142 143 144 145 146 147 148 149 150 151
/* Use IRQ_SUPPLE_0 to request reschedule.
 * When returning from interrupt to user space,
 * there is chance to reschedule */
static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
{
	unsigned int cpu = smp_processor_id();

	platform_clear_ipi(cpu, IRQ_SUPPLE_0);
	return IRQ_HANDLED;
}

static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
152
{
153
	struct ipi_message *msg;
154 155
	struct ipi_message_queue *msg_queue;
	unsigned int cpu = smp_processor_id();
156
	unsigned long flags;
157

158
	platform_clear_ipi(cpu, IRQ_SUPPLE_1);
159 160 161

	msg_queue = &__get_cpu_var(ipi_msg_queue);

162 163 164 165
	spin_lock_irqsave(&msg_queue->lock, flags);

	while (msg_queue->count) {
		msg = &msg_queue->ipi_message[msg_queue->head];
166
		switch (msg->type) {
167 168 169
		case BFIN_IPI_RESCHEDULE:
			scheduler_ipi();
			break;
170
		case BFIN_IPI_CALL_FUNC:
171
			spin_unlock_irqrestore(&msg_queue->lock, flags);
172
			ipi_call_function(cpu, msg);
173
			spin_lock_irqsave(&msg_queue->lock, flags);
174 175
			break;
		case BFIN_IPI_CPU_STOP:
176
			spin_unlock_irqrestore(&msg_queue->lock, flags);
177
			ipi_cpu_stop(cpu);
178
			spin_lock_irqsave(&msg_queue->lock, flags);
179 180
			break;
		default:
181 182
			printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
			       cpu, msg->type);
183 184
			break;
		}
185 186 187
		msg_queue->head++;
		msg_queue->head %= BFIN_IPI_MSGQ_LEN;
		msg_queue->count--;
188
	}
189
	spin_unlock_irqrestore(&msg_queue->lock, flags);
190 191 192 193 194 195 196 197 198 199 200
	return IRQ_HANDLED;
}

static void ipi_queue_init(void)
{
	unsigned int cpu;
	struct ipi_message_queue *msg_queue;
	for_each_possible_cpu(cpu) {
		msg_queue = &per_cpu(ipi_msg_queue, cpu);
		spin_lock_init(&msg_queue->lock);
		msg_queue->count = 0;
201
		msg_queue->head = 0;
202 203 204
	}
}

205 206
static inline void smp_send_message(cpumask_t callmap, unsigned long type,
					void (*func) (void *info), void *info, int wait)
207 208 209 210
{
	unsigned int cpu;
	struct ipi_message_queue *msg_queue;
	struct ipi_message *msg;
211 212
	unsigned long flags, next_msg;
	cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */
213 214 215 216

	for_each_cpu_mask(cpu, callmap) {
		msg_queue = &per_cpu(ipi_msg_queue, cpu);
		spin_lock_irqsave(&msg_queue->lock, flags);
217 218 219 220 221 222 223 224 225 226 227 228 229 230
		if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
			next_msg = (msg_queue->head + msg_queue->count)
					% BFIN_IPI_MSGQ_LEN;
			msg = &msg_queue->ipi_message[next_msg];
			msg->type = type;
			if (type == BFIN_IPI_CALL_FUNC) {
				msg->call_struct.func = func;
				msg->call_struct.info = info;
				msg->call_struct.wait = wait;
				msg->call_struct.waitmask = &waitmask;
			}
			msg_queue->count++;
		} else
			panic("IPI message queue overflow\n");
231
		spin_unlock_irqrestore(&msg_queue->lock, flags);
232
		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
233
	}
234

235
	if (wait) {
236
		while (!cpus_empty(waitmask))
237
			blackfin_dcache_invalidate_range(
238 239
				(unsigned long)(&waitmask),
				(unsigned long)(&waitmask));
240 241 242 243 244 245 246
#ifdef __ARCH_SYNC_CORE_DCACHE
		/*
		 * Invalidate D cache in case shared data was changed by
		 * other processors to ensure cache coherence.
		 */
		resync_core_dcache();
#endif
247
	}
248 249 250 251 252 253
}

int smp_call_function(void (*func)(void *info), void *info, int wait)
{
	cpumask_t callmap;

254
	preempt_disable();
255 256
	callmap = cpu_online_map;
	cpu_clear(smp_processor_id(), callmap);
257 258
	if (!cpus_empty(callmap))
		smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
259

260
	preempt_enable();
261

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function);

int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
				int wait)
{
	unsigned int cpu = cpuid;
	cpumask_t callmap;

	if (cpu_is_offline(cpu))
		return 0;
	cpus_clear(callmap);
	cpu_set(cpu, callmap);

277
	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
278 279 280 281 282 283 284

	return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function_single);

void smp_send_reschedule(int cpu)
{
285
	/* simply trigger an ipi */
286 287
	if (cpu_is_offline(cpu))
		return;
288
	platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
289 290 291 292 293 294 295 296

	return;
}

void smp_send_stop(void)
{
	cpumask_t callmap;

297
	preempt_disable();
298 299
	callmap = cpu_online_map;
	cpu_clear(smp_processor_id(), callmap);
300 301
	if (!cpus_empty(callmap))
		smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
302

303
	preempt_enable();
304 305 306 307 308 309 310

	return;
}

int __cpuinit __cpu_up(unsigned int cpu)
{
	int ret;
311 312 313 314
	static struct task_struct *idle;

	if (idle)
		free_task(idle);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

	idle = fork_idle(cpu);
	if (IS_ERR(idle)) {
		printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
		return PTR_ERR(idle);
	}

	secondary_stack = task_stack_page(idle) + THREAD_SIZE;

	ret = platform_boot_secondary(cpu, idle);

	secondary_stack = NULL;

	return ret;
}

static void __cpuinit setup_secondary(unsigned int cpu)
{
	unsigned long ilat;

	bfin_write_IMASK(0);
	CSYNC();
	ilat = bfin_read_ILAT();
	CSYNC();
	bfin_write_ILAT(ilat);
	CSYNC();

	/* Enable interrupt levels IVG7-15. IARs have been already
	 * programmed by the boot CPU.  */
344
	bfin_irq_flags |= IMASK_IVG15 |
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
}

void __cpuinit secondary_start_kernel(void)
{
	unsigned int cpu = smp_processor_id();
	struct mm_struct *mm = &init_mm;

	if (_bfin_swrst & SWRST_DBL_FAULT_B) {
		printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
		printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
			(int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
		printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
		printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
#endif
		printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
			init_retx_coreb);
	}

	/*
	 * We want the D-cache to be enabled early, in case the atomic
	 * support code emulates cache coherence (see
	 * __ARCH_SYNC_CORE_DCACHE).
	 */
	init_exception_vectors();

	local_irq_disable();

	/* Attach the new idle task to the global mm. */
	atomic_inc(&mm->mm_users);
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;

	preempt_disable();

	setup_secondary(cpu);

384 385
	platform_secondary_init(cpu);

386 387 388
	/* setup local core timer */
	bfin_local_timer_setup();

389 390
	local_irq_enable();

391 392
	bfin_setup_caches(cpu);

393 394 395 396 397 398
	/*
	 * Calibrate loops per jiffy value.
	 * IRQs need to be enabled here - D-cache can be invalidated
	 * in timer irq handler, so core B can read correct jiffies.
	 */
	calibrate_delay();
399 400 401 402 403 404 405 406 407 408 409 410

	cpu_idle();
}

void __init smp_prepare_boot_cpu(void)
{
}

void __init smp_prepare_cpus(unsigned int max_cpus)
{
	platform_prepare_cpus(max_cpus);
	ipi_queue_init();
411 412
	platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
	platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
413 414 415 416 417 418 419 420
}

void __init smp_cpus_done(unsigned int max_cpus)
{
	unsigned long bogosum = 0;
	unsigned int cpu;

	for_each_online_cpu(cpu)
421
		bogosum += loops_per_jiffy;
422 423 424 425 426 427 428 429 430 431 432 433 434

	printk(KERN_INFO "SMP: Total of %d processors activated "
	       "(%lu.%02lu BogoMIPS).\n",
	       num_online_cpus(),
	       bogosum / (500000/HZ),
	       (bogosum / (5000/HZ)) % 100);
}

void smp_icache_flush_range_others(unsigned long start, unsigned long end)
{
	smp_flush_data.start = start;
	smp_flush_data.end = end;

435
	if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
436 437 438 439
		printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);

440
#ifdef __ARCH_SYNC_CORE_ICACHE
441
unsigned long icache_invld_count[NR_CPUS];
442 443 444 445
void resync_core_icache(void)
{
	unsigned int cpu = get_cpu();
	blackfin_invalidate_entire_icache();
446
	icache_invld_count[cpu]++;
447 448 449 450 451
	put_cpu();
}
EXPORT_SYMBOL(resync_core_icache);
#endif

452
#ifdef __ARCH_SYNC_CORE_DCACHE
453
unsigned long dcache_invld_count[NR_CPUS];
454 455 456 457 458 459
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));

void resync_core_dcache(void)
{
	unsigned int cpu = get_cpu();
	blackfin_invalidate_entire_dcache();
460
	dcache_invld_count[cpu]++;
461 462 463 464
	put_cpu();
}
EXPORT_SYMBOL(resync_core_dcache);
#endif
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

#ifdef CONFIG_HOTPLUG_CPU
int __cpuexit __cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EPERM;

	set_cpu_online(cpu, false);
	return 0;
}

static DECLARE_COMPLETION(cpu_killed);

int __cpuexit __cpu_die(unsigned int cpu)
{
	return wait_for_completion_timeout(&cpu_killed, 5000);
}

void cpu_die(void)
{
	complete(&cpu_killed);

	atomic_dec(&init_mm.mm_users);
	atomic_dec(&init_mm.mm_count);

	local_irq_disable();
	platform_cpu_die();
}
#endif