system.h 8.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15
 * Copyright (C) 1996 by Paul M. Antoine
 * Copyright (C) 1999 Silicon Graphics
 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
 * Copyright (C) 2000 MIPS Technologies, Inc.
 */
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H

#include <linux/types.h>
16
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
17 18

#include <asm/addrspace.h>
19
#include <asm/barrier.h>
L
Linus Torvalds 已提交
20
#include <asm/cpu-features.h>
21
#include <asm/dsp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <asm/war.h>


/*
 * switch_to(n) should switch tasks to task nr n, first
 * checking that n isn't the current task, in which case it does nothing.
 */
extern asmlinkage void *resume(void *last, void *next, void *next_ti);

struct task_struct;

R
Ralf Baechle 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#ifdef CONFIG_MIPS_MT_FPAFF

/*
 * Handle the scheduler resume end of FPU affinity management.  We do this
 * inline to try to keep the overhead down. If we have been forced to run on
 * a "CPU" with an FPU because of a previous high level of FP computation,
 * but did not actually use the FPU during the most recent time-slice (CU1
 * isn't set), we undo the restriction on cpus_allowed.
 *
 * We're not calling set_cpus_allowed() here, because we have no need to
 * force prompt migration - we're already switching the current CPU to a
 * different thread.
 */

#define switch_to(prev,next,last)					\
do {									\
	if (cpu_has_fpu &&						\
	    (prev->thread.mflags & MF_FPUBOUND) &&			\
	     (!(KSTK_STATUS(prev) & ST0_CU1))) {			\
		prev->thread.mflags &= ~MF_FPUBOUND;			\
		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
	}								\
	if (cpu_has_dsp)						\
		__save_dsp(prev);					\
	next->thread.emulated_fp = 0;					\
	(last) = resume(prev, next, next->thread_info);			\
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
} while(0)

#else
64 65 66 67
#define switch_to(prev,next,last)					\
do {									\
	if (cpu_has_dsp)						\
		__save_dsp(prev);					\
A
Al Viro 已提交
68
	(last) = resume(prev, next, task_thread_info(next));		\
69 70
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
L
Linus Torvalds 已提交
71
} while(0)
R
Ralf Baechle 已提交
72
#endif
L
Linus Torvalds 已提交
73

74 75 76 77 78 79 80 81 82 83
/*
 * On SMP systems, when the scheduler does migration-cost autodetection,
 * it needs a way to flush as much of the CPU's caches as possible.
 *
 * TODO: fill this in!
 */
static inline void sched_cacheflush(void)
{
}

L
Linus Torvalds 已提交
84 85 86 87 88 89 90 91
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long dummy;

		__asm__ __volatile__(
92
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
93
		"1:	ll	%0, %3			# xchg_u32	\n"
94
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
95
		"	move	%2, %z4					\n"
96
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
97 98
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
99
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else if (cpu_has_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
107
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
108
		"1:	ll	%0, %3			# xchg_u32	\n"
109
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
110
		"	move	%2, %z4					\n"
111
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
112
		"	sc	%2, %1					\n"
113 114 115 116
		"	beqz	%2, 2f					\n"
		"	.subsection 2					\n"
		"2:	b	1b					\n"
		"	.previous					\n"
117
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
118 119 120 121 122 123 124 125 126 127 128 129
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		*m = val;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

130 131
	smp_mb();

L
Linus Torvalds 已提交
132 133 134
	return retval;
}

135
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
136 137 138 139 140 141 142 143
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
	__u64 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long dummy;

		__asm__ __volatile__(
144
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
145 146 147 148
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
149
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
150 151 152 153 154 155 156
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else if (cpu_has_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
157
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
158 159 160
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
161 162 163 164
		"	beqz	%2, 2f					\n"
		"	.subsection 2					\n"
		"2:	b	1b					\n"
		"	.previous					\n"
165
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
166 167 168 169 170 171 172 173 174 175 176 177
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		*m = val;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

178 179
	smp_mb();

L
Linus Torvalds 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192 193
	return retval;
}
#else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
#endif

/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid xchg().  */
extern void __xchg_called_with_bad_pointer(void);

static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
R
Ralf Baechle 已提交
194 195 196 197
	case 4:
		return __xchg_u32(ptr, x);
	case 8:
		return __xchg_u64(ptr, x);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	}
	__xchg_called_with_bad_pointer();
	return x;
}

#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
	unsigned long new)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
215
		"	.set	push					\n"
L
Linus Torvalds 已提交
216
		"	.set	noat					\n"
217
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
218 219
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
220
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
221
		"	move	$1, %z4					\n"
222
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
223 224 225
		"	sc	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
226
		"	.set	pop					\n"
227
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
228 229 230 231
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
232
		"	.set	push					\n"
L
Linus Torvalds 已提交
233
		"	.set	noat					\n"
234
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
235 236
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
237
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
238
		"	move	$1, %z4					\n"
239
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
240
		"	sc	$1, %1					\n"
241
		"	beqz	$1, 3f					\n"
L
Linus Torvalds 已提交
242
		"2:							\n"
243 244 245
		"	.subsection 2					\n"
		"3:	b	1b					\n"
		"	.previous					\n"
246
		"	.set	pop					\n"
247
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255 256 257 258 259
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		if (retval == old)
			*m = new;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

260 261
	smp_mb();

L
Linus Torvalds 已提交
262 263 264
	return retval;
}

265
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
266 267 268 269 270
static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
	unsigned long new)
{
	__u64 retval;

271
	if (cpu_has_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
272
		__asm__ __volatile__(
273
		"	.set	push					\n"
L
Linus Torvalds 已提交
274
		"	.set	noat					\n"
275
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
276 277 278 279 280 281
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
282
		"	.set	pop					\n"
283
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
284 285 286 287
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
288
		"	.set	push					\n"
L
Linus Torvalds 已提交
289
		"	.set	noat					\n"
290
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
291 292 293 294
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
295
		"	beqz	$1, 3f					\n"
L
Linus Torvalds 已提交
296
		"2:							\n"
297 298 299
		"	.subsection 2					\n"
		"3:	b	1b					\n"
		"	.previous					\n"
300
		"	.set	pop					\n"
301
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312 313
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		if (retval == old)
			*m = new;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

314 315
	smp_mb();

L
Linus Torvalds 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	return retval;
}
#else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
	volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
#endif

/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid cmpxchg().  */
extern void __cmpxchg_called_with_bad_pointer(void);

static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
	unsigned long new, int size)
{
	switch (size) {
	case 4:
		return __cmpxchg_u32(ptr, old, new);
	case 8:
		return __cmpxchg_u64(ptr, old, new);
	}
	__cmpxchg_called_with_bad_pointer();
	return old;
}

#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))

343 344 345
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
extern void *set_vi_handler (int n, void *addr);
L
Linus Torvalds 已提交
346
extern void *set_except_vector(int n, void *addr);
347
extern unsigned long ebase;
L
Linus Torvalds 已提交
348 349 350 351 352
extern void per_cpu_trap_init(void);

extern int stop_a_enabled;

/*
353
 * See include/asm-ia64/system.h; prevents deadlock on SMP
L
Linus Torvalds 已提交
354 355
 * systems.
 */
356
#define __ARCH_WANT_UNLOCKED_CTXSW
L
Linus Torvalds 已提交
357 358 359 360

#define arch_align_stack(x) (x)

#endif /* _ASM_SYSTEM_H */