system.h 8.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15
 * Copyright (C) 1996 by Paul M. Antoine
 * Copyright (C) 1999 Silicon Graphics
 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
 * Copyright (C) 2000 MIPS Technologies, Inc.
 */
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H

#include <linux/types.h>
16
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
17 18

#include <asm/addrspace.h>
19
#include <asm/barrier.h>
L
Linus Torvalds 已提交
20
#include <asm/cpu-features.h>
21
#include <asm/dsp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <asm/war.h>


/*
 * switch_to(n) should switch tasks to task nr n, first
 * checking that n isn't the current task, in which case it does nothing.
 */
extern asmlinkage void *resume(void *last, void *next, void *next_ti);

struct task_struct;

R
Ralf Baechle 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#ifdef CONFIG_MIPS_MT_FPAFF

/*
 * Handle the scheduler resume end of FPU affinity management.  We do this
 * inline to try to keep the overhead down. If we have been forced to run on
 * a "CPU" with an FPU because of a previous high level of FP computation,
 * but did not actually use the FPU during the most recent time-slice (CU1
 * isn't set), we undo the restriction on cpus_allowed.
 *
 * We're not calling set_cpus_allowed() here, because we have no need to
 * force prompt migration - we're already switching the current CPU to a
 * different thread.
 */

#define switch_to(prev,next,last)					\
do {									\
	if (cpu_has_fpu &&						\
	    (prev->thread.mflags & MF_FPUBOUND) &&			\
	     (!(KSTK_STATUS(prev) & ST0_CU1))) {			\
		prev->thread.mflags &= ~MF_FPUBOUND;			\
		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
	}								\
	if (cpu_has_dsp)						\
		__save_dsp(prev);					\
	next->thread.emulated_fp = 0;					\
	(last) = resume(prev, next, next->thread_info);			\
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
} while(0)

#else
64 65 66 67
#define switch_to(prev,next,last)					\
do {									\
	if (cpu_has_dsp)						\
		__save_dsp(prev);					\
A
Al Viro 已提交
68
	(last) = resume(prev, next, task_thread_info(next));		\
69 70
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
L
Linus Torvalds 已提交
71
} while(0)
R
Ralf Baechle 已提交
72
#endif
L
Linus Torvalds 已提交
73

74 75 76 77 78 79 80 81 82 83
/*
 * On SMP systems, when the scheduler does migration-cost autodetection,
 * it needs a way to flush as much of the CPU's caches as possible.
 *
 * TODO: fill this in!
 */
static inline void sched_cacheflush(void)
{
}

L
Linus Torvalds 已提交
84 85 86 87 88 89 90 91
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long dummy;

		__asm__ __volatile__(
92
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
93
		"1:	ll	%0, %3			# xchg_u32	\n"
94
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
95
		"	move	%2, %z4					\n"
96
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
97 98
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
99
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else if (cpu_has_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
107
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
108
		"1:	ll	%0, %3			# xchg_u32	\n"
109
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
110
		"	move	%2, %z4					\n"
111
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
112 113
		"	sc	%2, %1					\n"
		"	beqz	%2, 1b					\n"
114
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
115 116 117 118 119 120 121 122 123 124 125 126
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		*m = val;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

127 128
	smp_mb();

L
Linus Torvalds 已提交
129 130 131
	return retval;
}

132
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
133 134 135 136 137 138 139 140
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
	__u64 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long dummy;

		__asm__ __volatile__(
141
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
142 143 144 145
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
146
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
147 148 149 150 151 152 153
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else if (cpu_has_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
154
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
155 156 157 158
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
		"	beqz	%2, 1b					\n"
159
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168 169 170 171
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		*m = val;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

172 173
	smp_mb();

L
Linus Torvalds 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187
	return retval;
}
#else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
#endif

/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid xchg().  */
extern void __xchg_called_with_bad_pointer(void);

static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
R
Ralf Baechle 已提交
188 189 190 191
	case 4:
		return __xchg_u32(ptr, x);
	case 8:
		return __xchg_u64(ptr, x);
L
Linus Torvalds 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	}
	__xchg_called_with_bad_pointer();
	return x;
}

#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
	unsigned long new)
{
	__u32 retval;

	if (cpu_has_llsc && R10000_LLSC_WAR) {
		__asm__ __volatile__(
209
		"	.set	push					\n"
L
Linus Torvalds 已提交
210
		"	.set	noat					\n"
211
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
212 213
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
214
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
215
		"	move	$1, %z4					\n"
216
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
217 218 219
		"	sc	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
220
		"	.set	pop					\n"
221
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
222 223 224 225
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
226
		"	.set	push					\n"
L
Linus Torvalds 已提交
227
		"	.set	noat					\n"
228
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
229 230
		"1:	ll	%0, %2			# __cmpxchg_u32	\n"
		"	bne	%0, %z3, 2f				\n"
231
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
232
		"	move	$1, %z4					\n"
233
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
234 235 236
		"	sc	$1, %1					\n"
		"	beqz	$1, 1b					\n"
		"2:							\n"
237
		"	.set	pop					\n"
238
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
239 240 241 242 243 244 245 246 247 248 249 250
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		if (retval == old)
			*m = new;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

251 252
	smp_mb();

L
Linus Torvalds 已提交
253 254 255
	return retval;
}

256
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
257 258 259 260 261
static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
	unsigned long new)
{
	__u64 retval;

262
	if (cpu_has_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
263
		__asm__ __volatile__(
264
		"	.set	push					\n"
L
Linus Torvalds 已提交
265
		"	.set	noat					\n"
266
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
267 268 269 270 271 272
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"2:							\n"
273
		"	.set	pop					\n"
274
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
275 276 277 278
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else if (cpu_has_llsc) {
		__asm__ __volatile__(
279
		"	.set	push					\n"
L
Linus Torvalds 已提交
280
		"	.set	noat					\n"
281
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
282 283 284 285 286 287
		"1:	lld	%0, %2			# __cmpxchg_u64	\n"
		"	bne	%0, %z3, 2f				\n"
		"	move	$1, %z4					\n"
		"	scd	$1, %1					\n"
		"	beqz	$1, 1b					\n"
		"2:							\n"
288
		"	.set	pop					\n"
289
		: "=&r" (retval), "=R" (*m)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298 299 300 301
		: "R" (*m), "Jr" (old), "Jr" (new)
		: "memory");
	} else {
		unsigned long flags;

		local_irq_save(flags);
		retval = *m;
		if (retval == old)
			*m = new;
		local_irq_restore(flags);	/* implies memory barrier  */
	}

302 303
	smp_mb();

L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	return retval;
}
#else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
	volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
#endif

/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid cmpxchg().  */
extern void __cmpxchg_called_with_bad_pointer(void);

static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
	unsigned long new, int size)
{
	switch (size) {
	case 4:
		return __cmpxchg_u32(ptr, old, new);
	case 8:
		return __cmpxchg_u64(ptr, old, new);
	}
	__cmpxchg_called_with_bad_pointer();
	return old;
}

#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))

331 332 333
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
extern void *set_vi_handler (int n, void *addr);
L
Linus Torvalds 已提交
334
extern void *set_except_vector(int n, void *addr);
335
extern unsigned long ebase;
L
Linus Torvalds 已提交
336 337 338 339 340
extern void per_cpu_trap_init(void);

extern int stop_a_enabled;

/*
341
 * See include/asm-ia64/system.h; prevents deadlock on SMP
L
Linus Torvalds 已提交
342 343
 * systems.
 */
344
#define __ARCH_WANT_UNLOCKED_CTXSW
L
Linus Torvalds 已提交
345 346 347 348

#define arch_align_stack(x) (x)

#endif /* _ASM_SYSTEM_H */