system.h 11.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15
 * Copyright (C) 1996 by Paul M. Antoine
 * Copyright (C) 1999 Silicon Graphics
 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
 * Copyright (C) 2000 MIPS Technologies, Inc.
 */
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H

#include <linux/types.h>
16
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
17 18

#include <asm/addrspace.h>
19
#include <asm/barrier.h>
L
Linus Torvalds 已提交
20
#include <asm/cpu-features.h>
21
#include <asm/dsp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <asm/war.h>


/*
 * switch_to(n) should switch tasks to task nr n, first
 * checking that n isn't the current task, in which case it does nothing.
 */
extern asmlinkage void *resume(void *last, void *next, void *next_ti);

struct task_struct;

R
Ralf Baechle 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
#ifdef CONFIG_MIPS_MT_FPAFF

/*
 * Handle the scheduler resume end of FPU affinity management.  We do this
 * inline to try to keep the overhead down. If we have been forced to run on
 * a "CPU" with an FPU because of a previous high level of FP computation,
 * but did not actually use the FPU during the most recent time-slice (CU1
 * isn't set), we undo the restriction on cpus_allowed.
 *
 * We're not calling set_cpus_allowed() here, because we have no need to
 * force prompt migration - we're already switching the current CPU to a
 * different thread.
 */

47
#define __mips_mt_fpaff_switch_to(prev)					\
R
Ralf Baechle 已提交
48 49 50 51 52 53 54 55 56 57 58
do {									\
	if (cpu_has_fpu &&						\
	    (prev->thread.mflags & MF_FPUBOUND) &&			\
	     (!(KSTK_STATUS(prev) & ST0_CU1))) {			\
		prev->thread.mflags &= ~MF_FPUBOUND;			\
		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
	}								\
	next->thread.emulated_fp = 0;					\
} while(0)

#else
R
Ralf Baechle 已提交
59
#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
60 61
#endif

62 63
#define switch_to(prev,next,last)					\
do {									\
64
	__mips_mt_fpaff_switch_to(prev);				\
65 66
	if (cpu_has_dsp)						\
		__save_dsp(prev);					\
A
Al Viro 已提交
67
	(last) = resume(prev, next, task_thread_info(next));		\
68 69
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
70 71
	if (cpu_has_userlocal)						\
		write_c0_userlocal(task_thread_info(current)->tp_value);\
L
Linus Torvalds 已提交
72 73
} while(0)

74 75 76 77 78 79 80 81 82 83
/*
 * On SMP systems, when the scheduler does migration-cost autodetection,
 * it needs a way to flush as much of the CPU's caches as possible.
 *
 * TODO: fill this in!
 */
static inline void sched_cacheflush(void)
{
}

L
Linus Torvalds 已提交
84 85 86 87 88 89 90 91
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long dummy;

		__asm__ __volatile__(
92
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
93
		"1:	ll	%0, %3			# xchg_u32	\n"
94
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
95
		"	move	%2, %z4					\n"
96
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
97 98
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
99
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else if (cpu_has_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
107
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
108
		"1:	ll	%0, %3			# xchg_u32	\n"
109
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
110
		"	move	%2, %z4					\n"
111
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
112
		"	sc	%2, %1					\n"
113 114 115 116
		"	beqz	%2, 2f					\n"
		"	.subsection 2					\n"
		"2:	b	1b					\n"
		"	.previous					\n"
117
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
118 119 120 121 122 123
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else {
		unsigned long flags;

124
		raw_local_irq_save(flags);
L
Linus Torvalds 已提交
125 126
		retval = *m;
		*m = val;
127
		raw_local_irq_restore(flags);	/* implies memory barrier  */
L
Linus Torvalds 已提交
128 129
	}

130 131
	smp_mb();

L
Linus Torvalds 已提交
132 133 134
	return retval;
}

135
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
136 137 138 139 140 141 142 143
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
	__u64 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long dummy;

		__asm__ __volatile__(
144
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
145 146 147 148
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
149
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
150 151 152 153 154 155 156
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else if (cpu_has_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
157
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
158 159 160
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
161 162 163 164
		"	beqz	%2, 2f					\n"
		"	.subsection 2					\n"
		"2:	b	1b					\n"
		"	.previous					\n"
165
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
166 167 168 169 170 171
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else {
		unsigned long flags;

172
		raw_local_irq_save(flags);
L
Linus Torvalds 已提交
173 174
		retval = *m;
		*m = val;
175
		raw_local_irq_restore(flags);	/* implies memory barrier  */
L
Linus Torvalds 已提交
176 177
	}

178 179
	smp_mb();

L
Linus Torvalds 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192 193
	return retval;
}
#else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
#endif

/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid xchg().  */
extern void __xchg_called_with_bad_pointer(void);

static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
R
Ralf Baechle 已提交
194 195 196 197
	case 4:
		return __xchg_u32(ptr, x);
	case 8:
		return __xchg_u64(ptr, x);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	}
	__xchg_called_with_bad_pointer();
	return x;
}

#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
	unsigned long new)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
214
		"	.set	push					\n"
L
Linus Torvalds 已提交
215
		"	.set	noat					\n"
216
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
217 218
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
219
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
220
		"	move	$1, %z4					\n"
221
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
222 223 224
		"	sc	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
225
		"	.set	pop					\n"
226
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
227 228 229 230
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
231
		"	.set	push					\n"
L
Linus Torvalds 已提交
232
		"	.set	noat					\n"
233
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
234 235
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
236
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
237
		"	move	$1, %z4					\n"
238
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
239
		"	sc	$1, %1					\n"
240
		"	beqz	$1, 3f					\n"
L
Linus Torvalds 已提交
241
		"2:							\n"
242 243 244
		"	.subsection 2					\n"
		"3:	b	1b					\n"
		"	.previous					\n"
245
		"	.set	pop					\n"
246
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
247 248 249 250 251
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

252
		raw_local_irq_save(flags);
L
Linus Torvalds 已提交
253 254 255
		retval = *m;
		if (retval == old)
			*m = new;
256
		raw_local_irq_restore(flags);	/* implies memory barrier  */
L
Linus Torvalds 已提交
257 258
	}

259 260
	smp_mb();

L
Linus Torvalds 已提交
261 262 263
	return retval;
}

M
Mathieu Desnoyers 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static inline unsigned long __cmpxchg_u32_local(volatile int * m,
	unsigned long old, unsigned long new)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
		"	.set	push					\n"
		"	.set	noat					\n"
		"	.set	mips3					\n"
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
		"	.set	mips0					\n"
		"	move	$1, %z4					\n"
		"	.set	mips3					\n"
		"	sc	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
		"	.set	pop					\n"
		: "=&r" (retval), "=R" (*m)
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
		"	.set	push					\n"
		"	.set	noat					\n"
		"	.set	mips3					\n"
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
		"	.set	mips0					\n"
		"	move	$1, %z4					\n"
		"	.set	mips3					\n"
		"	sc	$1, %1					\n"
		"	beqz	$1, 1b					\n"
		"2:							\n"
		"	.set	pop					\n"
		: "=&r" (retval), "=R" (*m)
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		if (retval == old)
			*m = new;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

	return retval;
}

316
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
317 318 319 320 321
static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
	unsigned long new)
{
	__u64 retval;

322
	if (cpu_has_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
323
		__asm__ __volatile__(
324
		"	.set	push					\n"
L
Linus Torvalds 已提交
325
		"	.set	noat					\n"
326
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
327 328 329 330 331 332
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
333
		"	.set	pop					\n"
334
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
335 336 337 338
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
339
		"	.set	push					\n"
L
Linus Torvalds 已提交
340
		"	.set	noat					\n"
341
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
342 343 344 345
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
346
		"	beqz	$1, 3f					\n"
L
Linus Torvalds 已提交
347
		"2:							\n"
348 349 350
		"	.subsection 2					\n"
		"3:	b	1b					\n"
		"	.previous					\n"
351
		"	.set	pop					\n"
352
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
353 354 355 356 357
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

358
		raw_local_irq_save(flags);
L
Linus Torvalds 已提交
359 360 361
		retval = *m;
		if (retval == old)
			*m = new;
362
		raw_local_irq_restore(flags);	/* implies memory barrier  */
L
Linus Torvalds 已提交
363 364
	}

365 366
	smp_mb();

L
Linus Torvalds 已提交
367 368
	return retval;
}
M
Mathieu Desnoyers 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417

static inline unsigned long __cmpxchg_u64_local(volatile int * m,
	unsigned long old, unsigned long new)
{
	__u64 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
		"	.set	push					\n"
		"	.set	noat					\n"
		"	.set	mips3					\n"
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
		"	.set	pop					\n"
		: "=&r" (retval), "=R" (*m)
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
		"	.set	push					\n"
		"	.set	noat					\n"
		"	.set	mips3					\n"
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
		"	beqz	$1, 1b					\n"
		"2:							\n"
		"	.set	pop					\n"
		: "=&r" (retval), "=R" (*m)
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		if (retval == old)
			*m = new;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

	return retval;
}

L
Linus Torvalds 已提交
418 419 420 421
#else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
	volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
M
Mathieu Desnoyers 已提交
422 423 424
extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
	volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
#endif

/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid cmpxchg().  */
extern void __cmpxchg_called_with_bad_pointer(void);

static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
	unsigned long new, int size)
{
	switch (size) {
	case 4:
		return __cmpxchg_u32(ptr, old, new);
	case 8:
		return __cmpxchg_u64(ptr, old, new);
	}
	__cmpxchg_called_with_bad_pointer();
	return old;
}

M
Mathieu Desnoyers 已提交
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
static inline unsigned long __cmpxchg_local(volatile void * ptr,
	unsigned long old, unsigned long new, int size)
{
	switch (size) {
	case 4:
		return __cmpxchg_u32_local(ptr, old, new);
	case 8:
		return __cmpxchg_u64_local(ptr, old, new);
	}
	__cmpxchg_called_with_bad_pointer();
	return old;
}

#define cmpxchg(ptr,old,new) \
	((__typeof__(*(ptr)))__cmpxchg((ptr), \
		(unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))

#define cmpxchg_local(ptr,old,new) \
	((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
		(unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
L
Linus Torvalds 已提交
464

465 466
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
467 468 469 470

typedef void (*vi_handler_t)(void);
extern void *set_vi_handler (int n, vi_handler_t addr);

L
Linus Torvalds 已提交
471
extern void *set_except_vector(int n, void *addr);
472
extern unsigned long ebase;
L
Linus Torvalds 已提交
473 474 475 476 477
extern void per_cpu_trap_init(void);

extern int stop_a_enabled;

/*
478
 * See include/asm-ia64/system.h; prevents deadlock on SMP
L
Linus Torvalds 已提交
479 480
 * systems.
 */
481
#define __ARCH_WANT_UNLOCKED_CTXSW
L
Linus Torvalds 已提交
482 483 484 485

#define arch_align_stack(x) (x)

#endif /* _ASM_SYSTEM_H */