hazards.h 8.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
7 8
 * Copyright (C) MIPS Technologies, Inc.
 *   written by Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
9 10 11 12
 */
#ifndef _ASM_HAZARDS_H
#define _ASM_HAZARDS_H

13
#include <linux/stringify.h>
14
#include <asm/compiler.h>
L
Linus Torvalds 已提交
15

16 17
#define ___ssnop							\
	sll	$0, $0, 1
R
Ralf Baechle 已提交
18

19 20
#define ___ehb								\
	sll	$0, $0, 3
R
Ralf Baechle 已提交
21

L
Linus Torvalds 已提交
22
/*
R
Ralf Baechle 已提交
23
 * TLB hazards
L
Linus Torvalds 已提交
24
 */
25 26
#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && \
	!defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_LOONGSON3_ENHANCEMENT)
L
Linus Torvalds 已提交
27 28

/*
R
Ralf Baechle 已提交
29
 * MIPSR2 defines ehb for hazard avoidance
L
Linus Torvalds 已提交
30 31
 */

32 33 34
#define __mtc0_tlbw_hazard						\
	___ehb

35 36 37
#define __mtc0_tlbr_hazard						\
	___ehb

38 39 40
#define __tlbw_use_hazard						\
	___ehb

41 42 43
#define __tlb_read_hazard						\
	___ehb

44 45 46 47 48 49 50 51 52 53 54 55
#define __tlb_probe_hazard						\
	___ehb

#define __irq_enable_hazard						\
	___ehb

#define __irq_disable_hazard						\
	___ehb

#define __back_to_back_c0_hazard					\
	___ehb

L
Linus Torvalds 已提交
56
/*
R
Ralf Baechle 已提交
57
 * gcc has a tradition of misscompiling the previous construct using the
R
Ralf Baechle 已提交
58
 * address of a label as argument to inline assembler.	Gas otoh has the
R
Ralf Baechle 已提交
59 60 61 62
 * annoying difference between la and dla which are only usable for 32-bit
 * rsp. 64-bit code, so can't be used without conditional compilation.
 * The alterantive is switching the assembler to 64-bit code which happens
 * to work right even for 32-bit code ...
L
Linus Torvalds 已提交
63
 */
R
Ralf Baechle 已提交
64 65 66 67 68
#define instruction_hazard()						\
do {									\
	unsigned long tmp;						\
									\
	__asm__ __volatile__(						\
69
	"	.set "MIPS_ISA_LEVEL"				\n"	\
R
Ralf Baechle 已提交
70 71 72 73 74 75
	"	dla	%0, 1f					\n"	\
	"	jr.hb	%0					\n"	\
	"	.set	mips0					\n"	\
	"1:							\n"	\
	: "=r" (tmp));							\
} while (0)
L
Linus Torvalds 已提交
76

77 78
#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
	defined(CONFIG_CPU_BMIPS)
79 80 81 82 83

/*
 * These are slightly complicated by the fact that we guarantee R1 kernels to
 * run fine on R2 processors.
 */
84 85 86 87 88 89

#define __mtc0_tlbw_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ehb

90 91 92 93 94
#define __mtc0_tlbr_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ehb

95 96 97 98 99 100
#define __tlbw_use_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop;							\
	___ehb

101 102 103 104 105 106
#define __tlb_read_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop;							\
	___ehb

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#define __tlb_probe_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop;							\
	___ehb

#define __irq_enable_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop;							\
	___ehb

#define __irq_disable_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop;							\
	___ehb

#define __back_to_back_c0_hazard					\
	___ssnop;							\
	___ssnop;							\
	___ssnop;							\
	___ehb

131 132
/*
 * gcc has a tradition of misscompiling the previous construct using the
R
Ralf Baechle 已提交
133
 * address of a label as argument to inline assembler.	Gas otoh has the
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 * annoying difference between la and dla which are only usable for 32-bit
 * rsp. 64-bit code, so can't be used without conditional compilation.
 * The alterantive is switching the assembler to 64-bit code which happens
 * to work right even for 32-bit code ...
 */
#define __instruction_hazard()						\
do {									\
	unsigned long tmp;						\
									\
	__asm__ __volatile__(						\
	"	.set	mips64r2				\n"	\
	"	dla	%0, 1f					\n"	\
	"	jr.hb	%0					\n"	\
	"	.set	mips0					\n"	\
	"1:							\n"	\
	: "=r" (tmp));							\
} while (0)

#define instruction_hazard()						\
do {									\
154
	if (cpu_has_mips_r2_r6)						\
155 156 157
		__instruction_hazard();					\
} while (0)

158
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
159 160
	defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
	defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
L
Linus Torvalds 已提交
161 162

/*
R
Ralf Baechle 已提交
163
 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
L
Linus Torvalds 已提交
164 165
 */

166 167
#define __mtc0_tlbw_hazard

168 169
#define __mtc0_tlbr_hazard

170 171
#define __tlbw_use_hazard

172 173
#define __tlb_read_hazard

174 175 176 177 178 179 180 181
#define __tlb_probe_hazard

#define __irq_enable_hazard

#define __irq_disable_hazard

#define __back_to_back_c0_hazard

R
Ralf Baechle 已提交
182
#define instruction_hazard() do { } while (0)
L
Linus Torvalds 已提交
183

R
Ralf Baechle 已提交
184
#elif defined(CONFIG_CPU_SB1)
L
Linus Torvalds 已提交
185 186

/*
R
Ralf Baechle 已提交
187
 * Mostly like R4000 for historic reasons
L
Linus Torvalds 已提交
188
 */
189 190
#define __mtc0_tlbw_hazard

191 192
#define __mtc0_tlbr_hazard

193 194
#define __tlbw_use_hazard

195 196
#define __tlb_read_hazard

197 198 199 200 201 202 203 204 205 206 207
#define __tlb_probe_hazard

#define __irq_enable_hazard

#define __irq_disable_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop

#define __back_to_back_c0_hazard

R
Ralf Baechle 已提交
208
#define instruction_hazard() do { } while (0)
209

L
Linus Torvalds 已提交
210 211 212
#else

/*
R
Ralf Baechle 已提交
213 214
 * Finally the catchall case for all other processors including R4000, R4400,
 * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
215
 *
R
Ralf Baechle 已提交
216 217 218 219
 * The taken branch will result in a two cycle penalty for the two killed
 * instructions on R4000 / R4400.  Other processors only have a single cycle
 * hazard so this is nice trick to have an optimal code for a range of
 * processors.
220
 */
221 222 223 224
#define __mtc0_tlbw_hazard						\
	nop;								\
	nop

225 226 227 228
#define __mtc0_tlbr_hazard						\
	nop;								\
	nop

229 230 231 232 233
#define __tlbw_use_hazard						\
	nop;								\
	nop;								\
	nop

234 235 236 237 238
#define __tlb_read_hazard						\
	nop;								\
	nop;								\
	nop

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
#define __tlb_probe_hazard						\
	nop;								\
	nop;								\
	nop

#define __irq_enable_hazard						\
	___ssnop;							\
	___ssnop;							\
	___ssnop

#define __irq_disable_hazard						\
	nop;								\
	nop;								\
	nop

#define __back_to_back_c0_hazard					\
	___ssnop;							\
	___ssnop;							\
	___ssnop

259
#define instruction_hazard() do { } while (0)
260

R
Ralf Baechle 已提交
261
#endif
L
Linus Torvalds 已提交
262

C
Chris Dearman 已提交
263 264 265 266

/* FPU hazards */

#if defined(CONFIG_CPU_SB1)
267 268 269 270 271 272 273 274 275 276 277

#define __enable_fpu_hazard						\
	.set	push;							\
	.set	mips64;							\
	.set	noreorder;						\
	___ssnop;							\
	bnezl	$0, .+4;						\
	___ssnop;							\
	.set	pop

#define __disable_fpu_hazard
C
Chris Dearman 已提交
278

279
#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
280 281 282 283 284 285 286

#define __enable_fpu_hazard						\
	___ehb

#define __disable_fpu_hazard						\
	___ehb

C
Chris Dearman 已提交
287
#else
288 289 290 291 292 293 294 295 296 297

#define __enable_fpu_hazard						\
	nop;								\
	nop;								\
	nop;								\
	nop

#define __disable_fpu_hazard						\
	___ehb

C
Chris Dearman 已提交
298 299
#endif

300 301 302 303 304
#ifdef __ASSEMBLY__

#define _ssnop ___ssnop
#define	_ehb ___ehb
#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
305
#define mtc0_tlbr_hazard __mtc0_tlbr_hazard
306
#define tlbw_use_hazard __tlbw_use_hazard
307
#define tlb_read_hazard __tlb_read_hazard
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
#define tlb_probe_hazard __tlb_probe_hazard
#define irq_enable_hazard __irq_enable_hazard
#define irq_disable_hazard __irq_disable_hazard
#define back_to_back_c0_hazard __back_to_back_c0_hazard
#define enable_fpu_hazard __enable_fpu_hazard
#define disable_fpu_hazard __disable_fpu_hazard

#else

#define _ssnop()							\
do {									\
	__asm__ __volatile__(						\
	__stringify(___ssnop)						\
	);								\
} while (0)

#define	_ehb()								\
do {									\
	__asm__ __volatile__(						\
	__stringify(___ehb)						\
	);								\
} while (0)


#define mtc0_tlbw_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__mtc0_tlbw_hazard)					\
	);								\
} while (0)


340 341 342 343 344 345 346 347
#define mtc0_tlbr_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__mtc0_tlbr_hazard)					\
	);								\
} while (0)


348 349 350 351 352 353 354 355
#define tlbw_use_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__tlbw_use_hazard)					\
	);								\
} while (0)


356 357 358 359 360 361 362 363
#define tlb_read_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__tlb_read_hazard)					\
	);								\
} while (0)


364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
#define tlb_probe_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__tlb_probe_hazard)					\
	);								\
} while (0)


#define irq_enable_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__irq_enable_hazard)				\
	);								\
} while (0)


#define irq_disable_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__irq_disable_hazard)				\
	);								\
} while (0)


#define back_to_back_c0_hazard() 					\
do {									\
	__asm__ __volatile__(						\
	__stringify(__back_to_back_c0_hazard)				\
	);								\
} while (0)


#define enable_fpu_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__enable_fpu_hazard)				\
	);								\
} while (0)


#define disable_fpu_hazard()						\
do {									\
	__asm__ __volatile__(						\
	__stringify(__disable_fpu_hazard)				\
	);								\
} while (0)

/*
 * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
 */
extern void mips_ihb(void);

#endif /* __ASSEMBLY__  */

L
Linus Torvalds 已提交
418
#endif /* _ASM_HAZARDS_H */