bitops.h 15.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
L
Linus Torvalds 已提交
7 8 9 10 11
 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
 */
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H

J
Jiri Slaby 已提交
12 13 14 15
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif

L
Linus Torvalds 已提交
16 17
#include <linux/compiler.h>
#include <linux/types.h>
18
#include <asm/barrier.h>
L
Linus Torvalds 已提交
19
#include <asm/byteorder.h>		/* sigh ... */
20
#include <asm/compiler.h>
L
Linus Torvalds 已提交
21
#include <asm/cpu-features.h>
22 23
#include <asm/sgidefs.h>
#include <asm/war.h>
L
Linus Torvalds 已提交
24

25
#if _MIPS_SZLONG == 32
L
Linus Torvalds 已提交
26 27
#define SZLONG_LOG 5
#define SZLONG_MASK 31UL
28 29
#define __LL		"ll	"
#define __SC		"sc	"
R
Ralf Baechle 已提交
30 31
#define __INS		"ins	"
#define __EXT		"ext	"
32
#elif _MIPS_SZLONG == 64
L
Linus Torvalds 已提交
33 34
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
35 36
#define __LL		"lld	"
#define __SC		"scd	"
R
Ralf Baechle 已提交
37 38
#define __INS		"dins	 "
#define __EXT		"dext	 "
L
Linus Torvalds 已提交
39 40
#endif

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * These are the "slower" versions of the functions and are in bitops.c.
 * These functions call raw_local_irq_{save,restore}().
 */
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
			    volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
				 volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
			      volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
			       volatile unsigned long *addr);


L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70
/*
 * set_bit - Atomically set a bit in memory
 * @nr: the bit to set
 * @addr: the address to start counting from
 *
 * This function is atomic and may not be reordered.  See __set_bit()
 * if you do not require the atomic guarantees.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
71
	int bit = nr & SZLONG_MASK;
L
Linus Torvalds 已提交
72 73
	unsigned long temp;

74
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
75
		__asm__ __volatile__(
76
		"	.set	arch=r4000				\n"
L
Linus Torvalds 已提交
77 78
		"1:	" __LL "%0, %1			# set_bit	\n"
		"	or	%0, %2					\n"
79
		"	" __SC	"%0, %1					\n"
L
Linus Torvalds 已提交
80
		"	beqzl	%0, 1b					\n"
81
		"	.set	mips0					\n"
82 83
		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
84
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
85
	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
86 87 88 89 90
		do {
			__asm__ __volatile__(
			"	" __LL "%0, %1		# set_bit	\n"
			"	" __INS "%0, %3, %2, 1			\n"
			"	" __SC "%0, %1				\n"
91
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
92 93
			: "ir" (bit), "r" (~0));
		} while (unlikely(!temp));
94
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
95
	} else if (kernel_uses_llsc) {
96 97
		do {
			__asm__ __volatile__(
98
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
99 100 101 102
			"	" __LL "%0, %1		# set_bit	\n"
			"	or	%0, %2				\n"
			"	" __SC	"%0, %1				\n"
			"	.set	mips0				\n"
103
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
104 105
			: "ir" (1UL << bit));
		} while (unlikely(!temp));
106 107
	} else
		__mips_set_bit(nr, addr);
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115 116
}

/*
 * clear_bit - Clears a bit in memory
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * clear_bit() is atomic and may not be reordered.  However, it does
 * not contain a memory barrier, so if it is used for locking purposes,
P
Peter Zijlstra 已提交
117
 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
L
Linus Torvalds 已提交
118 119 120 121 122
 * in order to ensure changes are visible on other processors.
 */
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
123
	int bit = nr & SZLONG_MASK;
L
Linus Torvalds 已提交
124 125
	unsigned long temp;

126
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
127
		__asm__ __volatile__(
128
		"	.set	arch=r4000				\n"
L
Linus Torvalds 已提交
129 130 131 132
		"1:	" __LL "%0, %1			# clear_bit	\n"
		"	and	%0, %2					\n"
		"	" __SC "%0, %1					\n"
		"	beqzl	%0, 1b					\n"
133
		"	.set	mips0					\n"
134
		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
135
		: "ir" (~(1UL << bit)));
136
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137
	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138 139 140 141 142
		do {
			__asm__ __volatile__(
			"	" __LL "%0, %1		# clear_bit	\n"
			"	" __INS "%0, $0, %2, 1			\n"
			"	" __SC "%0, %1				\n"
143
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
144 145
			: "ir" (bit));
		} while (unlikely(!temp));
146
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
147
	} else if (kernel_uses_llsc) {
148 149
		do {
			__asm__ __volatile__(
150
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
151 152 153 154
			"	" __LL "%0, %1		# clear_bit	\n"
			"	and	%0, %2				\n"
			"	" __SC "%0, %1				\n"
			"	.set	mips0				\n"
155
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
156 157
			: "ir" (~(1UL << bit)));
		} while (unlikely(!temp));
158 159
	} else
		__mips_clear_bit(nr, addr);
L
Linus Torvalds 已提交
160 161
}

N
Nick Piggin 已提交
162 163 164 165 166 167 168 169 170 171
/*
 * clear_bit_unlock - Clears a bit in memory
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * clear_bit() is atomic and implies release semantics before the memory
 * operation. It can be used for an unlock.
 */
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
P
Peter Zijlstra 已提交
172
	smp_mb__before_atomic();
N
Nick Piggin 已提交
173 174 175
	clear_bit(nr, addr);
}

L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183 184 185 186
/*
 * change_bit - Toggle a bit in memory
 * @nr: Bit to change
 * @addr: Address to start counting from
 *
 * change_bit() is atomic and may not be reordered.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
187
	int bit = nr & SZLONG_MASK;
188

189
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
190 191 192 193
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
		unsigned long temp;

		__asm__ __volatile__(
194
		"	.set	arch=r4000			\n"
L
Linus Torvalds 已提交
195 196
		"1:	" __LL "%0, %1		# change_bit	\n"
		"	xor	%0, %2				\n"
197
		"	" __SC	"%0, %1				\n"
L
Linus Torvalds 已提交
198
		"	beqzl	%0, 1b				\n"
199
		"	.set	mips0				\n"
200
		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201
		: "ir" (1UL << bit));
202
	} else if (kernel_uses_llsc) {
L
Linus Torvalds 已提交
203 204 205
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
		unsigned long temp;

206 207
		do {
			__asm__ __volatile__(
208
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
209 210 211 212
			"	" __LL "%0, %1		# change_bit	\n"
			"	xor	%0, %2				\n"
			"	" __SC	"%0, %1				\n"
			"	.set	mips0				\n"
213
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
214 215
			: "ir" (1UL << bit));
		} while (unlikely(!temp));
216 217
	} else
		__mips_change_bit(nr, addr);
L
Linus Torvalds 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230
}

/*
 * test_and_set_bit - Set a bit and return its old value
 * @nr: Bit to set
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.
 * It also implies a memory barrier.
 */
static inline int test_and_set_bit(unsigned long nr,
	volatile unsigned long *addr)
{
231
	int bit = nr & SZLONG_MASK;
232
	unsigned long res;
233

234
	smp_mb__before_llsc();
N
Nick Piggin 已提交
235

236
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
237
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238
		unsigned long temp;
L
Linus Torvalds 已提交
239 240

		__asm__ __volatile__(
241
		"	.set	arch=r4000				\n"
L
Linus Torvalds 已提交
242 243 244 245 246
		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
		"	or	%2, %0, %3				\n"
		"	" __SC	"%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
247
		"	.set	mips0					\n"
248
		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
249
		: "r" (1UL << bit)
L
Linus Torvalds 已提交
250
		: "memory");
251
	} else if (kernel_uses_llsc) {
L
Linus Torvalds 已提交
252
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
253
		unsigned long temp;
L
Linus Torvalds 已提交
254

255 256
		do {
			__asm__ __volatile__(
257
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
258 259 260 261
			"	" __LL "%0, %1	# test_and_set_bit	\n"
			"	or	%2, %0, %3			\n"
			"	" __SC	"%2, %1				\n"
			"	.set	mips0				\n"
262
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
263 264 265 266 267
			: "r" (1UL << bit)
			: "memory");
		} while (unlikely(!res));

		res = temp & (1UL << bit);
268 269
	} else
		res = __mips_test_and_set_bit(nr, addr);
270

271
	smp_llsc_mb();
272 273

	return res != 0;
L
Linus Torvalds 已提交
274 275
}

N
Nick Piggin 已提交
276 277 278 279 280 281 282 283 284 285 286
/*
 * test_and_set_bit_lock - Set a bit and return its old value
 * @nr: Bit to set
 * @addr: Address to count from
 *
 * This operation is atomic and implies acquire ordering semantics
 * after the memory operation.
 */
static inline int test_and_set_bit_lock(unsigned long nr,
	volatile unsigned long *addr)
{
287
	int bit = nr & SZLONG_MASK;
N
Nick Piggin 已提交
288 289
	unsigned long res;

290
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
N
Nick Piggin 已提交
291 292 293 294
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
		unsigned long temp;

		__asm__ __volatile__(
295
		"	.set	arch=r4000				\n"
N
Nick Piggin 已提交
296 297 298 299 300 301
		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
		"	or	%2, %0, %3				\n"
		"	" __SC	"%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
		"	.set	mips0					\n"
302 303
		: "=&r" (temp), "+m" (*m), "=&r" (res)
		: "r" (1UL << bit)
N
Nick Piggin 已提交
304
		: "memory");
305
	} else if (kernel_uses_llsc) {
N
Nick Piggin 已提交
306 307 308
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
		unsigned long temp;

309 310
		do {
			__asm__ __volatile__(
311
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
312 313 314 315
			"	" __LL "%0, %1	# test_and_set_bit	\n"
			"	or	%2, %0, %3			\n"
			"	" __SC	"%2, %1				\n"
			"	.set	mips0				\n"
316
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
317 318 319 320 321
			: "r" (1UL << bit)
			: "memory");
		} while (unlikely(!res));

		res = temp & (1UL << bit);
322 323
	} else
		res = __mips_test_and_set_bit_lock(nr, addr);
N
Nick Piggin 已提交
324 325 326 327 328

	smp_llsc_mb();

	return res != 0;
}
L
Linus Torvalds 已提交
329 330 331 332 333 334 335 336 337 338 339
/*
 * test_and_clear_bit - Clear a bit and return its old value
 * @nr: Bit to clear
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.
 * It also implies a memory barrier.
 */
static inline int test_and_clear_bit(unsigned long nr,
	volatile unsigned long *addr)
{
340
	int bit = nr & SZLONG_MASK;
341
	unsigned long res;
342

343
	smp_mb__before_llsc();
N
Nick Piggin 已提交
344

345
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
346
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
347
		unsigned long temp;
L
Linus Torvalds 已提交
348 349

		__asm__ __volatile__(
350
		"	.set	arch=r4000				\n"
L
Linus Torvalds 已提交
351 352 353
		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
		"	or	%2, %0, %3				\n"
		"	xor	%2, %3					\n"
R
Ralf Baechle 已提交
354
		"	" __SC	"%2, %1					\n"
L
Linus Torvalds 已提交
355 356
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
357
		"	.set	mips0					\n"
358
		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
359
		: "r" (1UL << bit)
L
Linus Torvalds 已提交
360
		: "memory");
361
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
362
	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
363
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364
		unsigned long temp;
365

366 367
		do {
			__asm__ __volatile__(
R
Ralf Baechle 已提交
368
			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
369
			"	" __EXT "%2, %0, %3, 1			\n"
R
Ralf Baechle 已提交
370 371
			"	" __INS "%0, $0, %3, 1			\n"
			"	" __SC	"%0, %1				\n"
372
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
373 374 375
			: "ir" (bit)
			: "memory");
		} while (unlikely(!temp));
376
#endif
377
	} else if (kernel_uses_llsc) {
L
Linus Torvalds 已提交
378
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379
		unsigned long temp;
L
Linus Torvalds 已提交
380

381 382
		do {
			__asm__ __volatile__(
383
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
R
Ralf Baechle 已提交
384
			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
385 386
			"	or	%2, %0, %3			\n"
			"	xor	%2, %3				\n"
R
Ralf Baechle 已提交
387
			"	" __SC	"%2, %1				\n"
388
			"	.set	mips0				\n"
389
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
390 391 392 393 394
			: "r" (1UL << bit)
			: "memory");
		} while (unlikely(!res));

		res = temp & (1UL << bit);
395 396
	} else
		res = __mips_test_and_clear_bit(nr, addr);
397

398
	smp_llsc_mb();
399 400

	return res != 0;
L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413
}

/*
 * test_and_change_bit - Change a bit and return its old value
 * @nr: Bit to change
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.
 * It also implies a memory barrier.
 */
static inline int test_and_change_bit(unsigned long nr,
	volatile unsigned long *addr)
{
414
	int bit = nr & SZLONG_MASK;
415
	unsigned long res;
416

417
	smp_mb__before_llsc();
N
Nick Piggin 已提交
418

419
	if (kernel_uses_llsc && R10000_LLSC_WAR) {
L
Linus Torvalds 已提交
420
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421
		unsigned long temp;
L
Linus Torvalds 已提交
422 423

		__asm__ __volatile__(
424
		"	.set	arch=r4000				\n"
425
		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
L
Linus Torvalds 已提交
426
		"	xor	%2, %0, %3				\n"
427
		"	" __SC	"%2, %1					\n"
L
Linus Torvalds 已提交
428 429
		"	beqzl	%2, 1b					\n"
		"	and	%2, %0, %3				\n"
430
		"	.set	mips0					\n"
431
		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432
		: "r" (1UL << bit)
L
Linus Torvalds 已提交
433
		: "memory");
434
	} else if (kernel_uses_llsc) {
L
Linus Torvalds 已提交
435
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
436
		unsigned long temp;
L
Linus Torvalds 已提交
437

438 439
		do {
			__asm__ __volatile__(
440
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
R
Ralf Baechle 已提交
441
			"	" __LL	"%0, %1 # test_and_change_bit	\n"
442 443 444
			"	xor	%2, %0, %3			\n"
			"	" __SC	"\t%2, %1			\n"
			"	.set	mips0				\n"
445
			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
446 447 448 449 450
			: "r" (1UL << bit)
			: "memory");
		} while (unlikely(!res));

		res = temp & (1UL << bit);
451 452
	} else
		res = __mips_test_and_change_bit(nr, addr);
453

454
	smp_llsc_mb();
455 456

	return res != 0;
L
Linus Torvalds 已提交
457 458
}

459
#include <asm-generic/bitops/non-atomic.h>
L
Linus Torvalds 已提交
460

N
Nick Piggin 已提交
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
/*
 * __clear_bit_unlock - Clears a bit in memory
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * __clear_bit() is non-atomic and implies release semantics before the memory
 * operation. It can be used for an unlock if no other CPUs can concurrently
 * modify other bits in the word.
 */
static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
	smp_mb();
	__clear_bit(nr, addr);
}

L
Linus Torvalds 已提交
476
/*
477
 * Return the bit position (0..63) of the most significant 1 bit in a word
478 479
 * Returns -1 if no 1 bit exists
 */
480
static inline unsigned long __fls(unsigned long word)
481
{
482
	int num;
483

484
	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
485
	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
486
		__asm__(
487
		"	.set	push					\n"
488
		"	.set	"MIPS_ISA_LEVEL"			\n"
489 490
		"	clz	%0, %1					\n"
		"	.set	pop					\n"
491 492
		: "=r" (num)
		: "r" (word));
493

494
		return 31 - num;
495 496
	}

497
	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
498 499 500
	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
		__asm__(
		"	.set	push					\n"
501
		"	.set	"MIPS_ISA_LEVEL"			\n"
502 503 504 505
		"	dclz	%0, %1					\n"
		"	.set	pop					\n"
		: "=r" (num)
		: "r" (word));
506

507 508 509 510
		return 63 - num;
	}

	num = BITS_PER_LONG - 1;
511

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
#if BITS_PER_LONG == 64
	if (!(word & (~0ul << 32))) {
		num -= 32;
		word <<= 32;
	}
#endif
	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
		num -= 16;
		word <<= 16;
	}
	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
		num -= 8;
		word <<= 8;
	}
	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
		num -= 4;
		word <<= 4;
	}
	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
		num -= 2;
		word <<= 2;
	}
	if (!(word & (~0ul << (BITS_PER_LONG-1))))
		num -= 1;
	return num;
537 538 539 540
}

/*
 * __ffs - find first bit in word.
L
Linus Torvalds 已提交
541 542
 * @word: The word to search
 *
543 544
 * Returns 0..SZLONG-1
 * Undefined if no bit exists, so code should check against 0 first.
L
Linus Torvalds 已提交
545
 */
546
static inline unsigned long __ffs(unsigned long word)
L
Linus Torvalds 已提交
547
{
R
Ralf Baechle 已提交
548
	return __fls(word & -word);
L
Linus Torvalds 已提交
549 550 551
}

/*
552
 * fls - find last bit set.
L
Linus Torvalds 已提交
553 554
 * @word: The word to search
 *
555 556
 * This is defined the same way as ffs.
 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
L
Linus Torvalds 已提交
557
 */
558
static inline int fls(int x)
L
Linus Torvalds 已提交
559
{
560
	int r;
561

562 563
	if (!__builtin_constant_p(x) &&
	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
564 565
		__asm__(
		"	.set	push					\n"
566
		"	.set	"MIPS_ISA_LEVEL"			\n"
567 568 569 570
		"	clz	%0, %1					\n"
		"	.set	pop					\n"
		: "=r" (x)
		: "r" (x));
L
Linus Torvalds 已提交
571

572 573
		return 32 - x;
	}
574

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
	r = 32;
	if (!x)
		return 0;
	if (!(x & 0xffff0000u)) {
		x <<= 16;
		r -= 16;
	}
	if (!(x & 0xff000000u)) {
		x <<= 8;
		r -= 8;
	}
	if (!(x & 0xf0000000u)) {
		x <<= 4;
		r -= 4;
	}
	if (!(x & 0xc0000000u)) {
		x <<= 2;
		r -= 2;
	}
	if (!(x & 0x80000000u)) {
		x <<= 1;
		r -= 1;
	}
	return r;
599
}
600

601
#include <asm-generic/bitops/fls64.h>
602 603

/*
604
 * ffs - find first bit set.
605 606
 * @word: The word to search
 *
607 608 609
 * This is defined the same way as
 * the libc and compiler builtin ffs routines, therefore
 * differs in spirit from the above ffz (man ffs).
610
 */
611
static inline int ffs(int word)
612
{
613 614
	if (!word)
		return 0;
615

616
	return fls(word & -word);
617 618
}

619
#include <asm-generic/bitops/ffz.h>
620
#include <asm-generic/bitops/find.h>
L
Linus Torvalds 已提交
621 622 623

#ifdef __KERNEL__

624
#include <asm-generic/bitops/sched.h>
625 626 627 628

#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>

629
#include <asm-generic/bitops/le.h>
630
#include <asm-generic/bitops/ext2-atomic.h>
L
Linus Torvalds 已提交
631 632 633 634

#endif /* __KERNEL__ */

#endif /* _ASM_BITOPS_H */