system.h 5.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14
 * Copyright (C) 1996 by Paul M. Antoine
 * Copyright (C) 1999 Silicon Graphics
 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
 * Copyright (C) 2000 MIPS Technologies, Inc.
 */
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H

15
#include <linux/kernel.h>
L
Linus Torvalds 已提交
16
#include <linux/types.h>
17
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
18 19

#include <asm/addrspace.h>
20
#include <asm/barrier.h>
21
#include <asm/cmpxchg.h>
L
Linus Torvalds 已提交
22
#include <asm/cpu-features.h>
23
#include <asm/dsp.h>
24
#include <asm/watch.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32 33 34 35
#include <asm/war.h>


/*
 * switch_to(n) should switch tasks to task nr n, first
 * checking that n isn't the current task, in which case it does nothing.
 */
extern asmlinkage void *resume(void *last, void *next, void *next_ti);

struct task_struct;

36 37 38
extern unsigned int ll_bit;
extern struct task_struct *ll_task;

R
Ralf Baechle 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52
#ifdef CONFIG_MIPS_MT_FPAFF

/*
 * Handle the scheduler resume end of FPU affinity management.  We do this
 * inline to try to keep the overhead down. If we have been forced to run on
 * a "CPU" with an FPU because of a previous high level of FP computation,
 * but did not actually use the FPU during the most recent time-slice (CU1
 * isn't set), we undo the restriction on cpus_allowed.
 *
 * We're not calling set_cpus_allowed() here, because we have no need to
 * force prompt migration - we're already switching the current CPU to a
 * different thread.
 */

53
#define __mips_mt_fpaff_switch_to(prev)					\
R
Ralf Baechle 已提交
54
do {									\
55 56
	struct thread_info *__prev_ti = task_thread_info(prev);		\
									\
R
Ralf Baechle 已提交
57
	if (cpu_has_fpu &&						\
58 59 60
	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
R
Ralf Baechle 已提交
61 62 63 64 65 66
		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
	}								\
	next->thread.emulated_fp = 0;					\
} while(0)

#else
R
Ralf Baechle 已提交
67
#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
68 69
#endif

70 71
#define __clear_software_ll_bit()					\
do {									\
72 73
	if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)	\
		ll_bit = 0;						\
74 75
} while (0)

76
#define switch_to(prev, next, last)					\
77
do {									\
78
	__mips_mt_fpaff_switch_to(prev);				\
79 80
	if (cpu_has_dsp)						\
		__save_dsp(prev);					\
81
	__clear_software_ll_bit();					\
A
Al Viro 已提交
82
	(last) = resume(prev, next, task_thread_info(next));		\
83 84 85 86
} while (0)

#define finish_arch_switch(prev)					\
do {									\
87 88
	if (cpu_has_dsp)						\
		__restore_dsp(current);					\
89
	if (cpu_has_userlocal)						\
90
		write_c0_userlocal(current_thread_info()->tp_value);	\
91
	__restore_watch();						\
92
} while (0)
L
Linus Torvalds 已提交
93 94 95 96 97

static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
	__u32 retval;

98 99
	smp_mb__before_llsc();

100
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
101 102 103
		unsigned long dummy;

		__asm__ __volatile__(
104
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
105
		"1:	ll	%0, %3			# xchg_u32	\n"
106
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
107
		"	move	%2, %z4					\n"
108
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
109 110
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
111
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
112 113 114
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
115
	} else if (kernel_uses_llsc) {
L
Linus Torvalds 已提交
116 117
		unsigned long dummy;

118 119 120 121 122 123 124 125 126 127 128 129 130
		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	ll	%0, %3		# xchg_u32	\n"
			"	.set	mips0				\n"
			"	move	%2, %z4				\n"
			"	.set	mips3				\n"
			"	sc	%2, %1				\n"
			"	.set	mips0				\n"
			: "=&r" (retval), "=m" (*m), "=&r" (dummy)
			: "R" (*m), "Jr" (val)
			: "memory");
		} while (unlikely(!dummy));
L
Linus Torvalds 已提交
131 132 133
	} else {
		unsigned long flags;

134
		raw_local_irq_save(flags);
L
Linus Torvalds 已提交
135 136
		retval = *m;
		*m = val;
137
		raw_local_irq_restore(flags);	/* implies memory barrier  */
L
Linus Torvalds 已提交
138 139
	}

140
	smp_llsc_mb();
141

L
Linus Torvalds 已提交
142 143 144
	return retval;
}

145
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
146 147 148 149
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
	__u64 retval;

150 151
	smp_mb__before_llsc();

152
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
153 154 155
		unsigned long dummy;

		__asm__ __volatile__(
156
		"	.set	mips3					\n"
L
Linus Torvalds 已提交
157 158 159 160
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
161
		"	.set	mips0					\n"
L
Linus Torvalds 已提交
162 163 164
		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
		: "R" (*m), "Jr" (val)
		: "memory");
165
	} else if (kernel_uses_llsc) {
L
Linus Torvalds 已提交
166 167
		unsigned long dummy;

168 169 170 171 172 173 174 175 176 177 178
		do {
			__asm__ __volatile__(
			"	.set	mips3				\n"
			"	lld	%0, %3		# xchg_u64	\n"
			"	move	%2, %z4				\n"
			"	scd	%2, %1				\n"
			"	.set	mips0				\n"
			: "=&r" (retval), "=m" (*m), "=&r" (dummy)
			: "R" (*m), "Jr" (val)
			: "memory");
		} while (unlikely(!dummy));
L
Linus Torvalds 已提交
179 180 181
	} else {
		unsigned long flags;

182
		raw_local_irq_save(flags);
L
Linus Torvalds 已提交
183 184
		retval = *m;
		*m = val;
185
		raw_local_irq_restore(flags);	/* implies memory barrier  */
L
Linus Torvalds 已提交
186 187
	}

188
	smp_llsc_mb();
189

L
Linus Torvalds 已提交
190 191 192 193 194 195 196 197 198 199
	return retval;
}
#else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
#endif

static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
R
Ralf Baechle 已提交
200 201 202 203
	case 4:
		return __xchg_u32(ptr, x);
	case 8:
		return __xchg_u64(ptr, x);
L
Linus Torvalds 已提交
204
	}
205

L
Linus Torvalds 已提交
206 207 208
	return x;
}

209 210 211 212 213 214 215
#define xchg(ptr, x)							\
({									\
	BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc);				\
									\
	((__typeof__(*(ptr)))						\
		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))));	\
})
L
Linus Torvalds 已提交
216

217 218
extern void set_handler(unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
219 220

typedef void (*vi_handler_t)(void);
221
extern void *set_vi_handler(int n, vi_handler_t addr);
222

L
Linus Torvalds 已提交
223
extern void *set_except_vector(int n, void *addr);
224
extern unsigned long ebase;
L
Linus Torvalds 已提交
225 226 227
extern void per_cpu_trap_init(void);

/*
228
 * See include/asm-ia64/system.h; prevents deadlock on SMP
L
Linus Torvalds 已提交
229 230
 * systems.
 */
231
#define __ARCH_WANT_UNLOCKED_CTXSW
L
Linus Torvalds 已提交
232

233
extern unsigned long arch_align_stack(unsigned long sp);
L
Linus Torvalds 已提交
234 235

#endif /* _ASM_SYSTEM_H */