bitops.h 19.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  S390 version
3
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
4 5 6 7 8 9
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/bitops.h"
 *    Copyright (C) 1992, Linus Torvalds
 *
 */
H
Heiko Carstens 已提交
10

11 12 13
#ifndef _S390_BITOPS_H
#define _S390_BITOPS_H

J
Jiri Slaby 已提交
14 15 16 17
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif

18
#include <linux/typecheck.h>
L
Linus Torvalds 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#include <linux/compiler.h>

/*
 * 32 bit bitops format:
 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
 * bit 32 is the LSB of *(addr+4). That combined with the
 * big endian byte order on S390 give the following bit
 * order in memory:
 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
 * after that follows the next long with bit numbers
 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
 * The reason for this bit ordering is the fact that
 * in the architecture independent code bits operations
 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
 * with operation of the form "set_bit(bitnr, flags)".
 *
 * 64 bit bitops format:
 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
 * bit 64 is the LSB of *(addr+8). That combined with the
 * big endian byte order on S390 give the following bit
 * order in memory:
 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
 * after that follows the next long with bit numbers
 *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
 * The reason for this bit ordering is the fact that
 * in the architecture independent code bits operations
 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
 * with operation of the form "set_bit(bitnr, flags)".
 */

57
/* bitmap tables from arch/s390/kernel/bitmap.c */
L
Linus Torvalds 已提交
58 59 60
extern const char _zb_findmap[];
extern const char _sb_findmap[];

61
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
62 63 64 65 66

#define __BITOPS_OR		"or"
#define __BITOPS_AND		"nr"
#define __BITOPS_XOR		"xr"

67 68 69 70
#define __BITOPS_LOOP(__addr, __val, __op_string)		\
({								\
	unsigned long __old, __new;				\
								\
71
	typecheck(unsigned long *, (__addr));			\
72 73 74 75 76 77
	asm volatile(						\
		"	l	%0,%2\n"			\
		"0:	lr	%1,%0\n"			\
		__op_string "	%1,%3\n"			\
		"	cs	%0,%1,%2\n"			\
		"	jl	0b"				\
78 79
		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
		: "d" (__val)					\
80 81 82
		: "cc");					\
	__old;							\
})
83

84
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
85

86 87 88 89 90 91 92 93 94 95
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

#define __BITOPS_OR		"laog"
#define __BITOPS_AND		"lang"
#define __BITOPS_XOR		"laxg"

#define __BITOPS_LOOP(__addr, __val, __op_string)		\
({								\
	unsigned long __old;					\
								\
96
	typecheck(unsigned long *, (__addr));			\
97 98
	asm volatile(						\
		__op_string "	%0,%2,%1\n"			\
99
		: "=d" (__old),	"+Q" (*(__addr))		\
100 101 102 103 104 105 106
		: "d" (__val)					\
		: "cc");					\
	__old;							\
})

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

L
Linus Torvalds 已提交
107 108 109 110
#define __BITOPS_OR		"ogr"
#define __BITOPS_AND		"ngr"
#define __BITOPS_XOR		"xgr"

111 112 113 114
#define __BITOPS_LOOP(__addr, __val, __op_string)		\
({								\
	unsigned long __old, __new;				\
								\
115
	typecheck(unsigned long *, (__addr));			\
116 117 118 119 120 121
	asm volatile(						\
		"	lg	%0,%2\n"			\
		"0:	lgr	%1,%0\n"			\
		__op_string "	%1,%3\n"			\
		"	csg	%0,%1,%2\n"			\
		"	jl	0b"				\
122 123
		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
		: "d" (__val)					\
124 125 126 127 128
		: "cc");					\
	__old;							\
})

#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
129

130
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
131

132
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
L
Linus Torvalds 已提交
133

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static inline unsigned long *
__bitops_word(unsigned long nr, volatile unsigned long *ptr)
{
	unsigned long addr;

	addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
	return (unsigned long *)addr;
}

static inline unsigned char *
__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
	return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
}

static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
150
{
151 152
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long mask;
L
Linus Torvalds 已提交
153

154
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
155
	__BITOPS_LOOP(addr, mask, __BITOPS_OR);
L
Linus Torvalds 已提交
156 157
}

158
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
159
{
160 161
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long mask;
L
Linus Torvalds 已提交
162

163
	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
164
	__BITOPS_LOOP(addr, mask, __BITOPS_AND);
L
Linus Torvalds 已提交
165 166
}

167
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
168
{
169 170
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long mask;
L
Linus Torvalds 已提交
171

172
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
173
	__BITOPS_LOOP(addr, mask, __BITOPS_XOR);
L
Linus Torvalds 已提交
174 175 176
}

static inline int
177
test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
178
{
179 180
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long old, mask;
L
Linus Torvalds 已提交
181

182
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
183
	old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
184
	barrier();
L
Linus Torvalds 已提交
185 186 187 188
	return (old & mask) != 0;
}

static inline int
189
test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
190
{
191 192
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long old, mask;
L
Linus Torvalds 已提交
193

194
	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
195
	old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
196
	barrier();
197
	return (old & ~mask) != 0;
L
Linus Torvalds 已提交
198 199 200
}

static inline int
201
test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
202
{
203 204
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long old, mask;
L
Linus Torvalds 已提交
205

206
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
207
	old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
208
	barrier();
L
Linus Torvalds 已提交
209 210 211 212 213
	return (old & mask) != 0;
}

static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
214
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
215

216
	*addr |= 1 << (nr & 7);
L
Linus Torvalds 已提交
217 218 219 220 221
}

static inline void 
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
222
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
223

224
	*addr &= ~(1 << (nr & 7));
L
Linus Torvalds 已提交
225 226 227 228
}

static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
229
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
230

231
	*addr ^= 1 << (nr & 7);
L
Linus Torvalds 已提交
232 233 234
}

static inline int
235
__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
236
{
237
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
238 239
	unsigned char ch;

240 241
	ch = *addr;
	*addr |= 1 << (nr & 7);
L
Linus Torvalds 已提交
242 243 244 245
	return (ch >> (nr & 7)) & 1;
}

static inline int
246
__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
247
{
248
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
249 250
	unsigned char ch;

251 252
	ch = *addr;
	*addr &= ~(1 << (nr & 7));
L
Linus Torvalds 已提交
253 254 255 256
	return (ch >> (nr & 7)) & 1;
}

static inline int
257
__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
258
{
259
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
260 261
	unsigned char ch;

262 263
	ch = *addr;
	*addr ^= 1 << (nr & 7);
L
Linus Torvalds 已提交
264 265 266
	return (ch >> (nr & 7)) & 1;
}

267
static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
L
Linus Torvalds 已提交
268
{
269
	const volatile unsigned char *addr;
L
Linus Torvalds 已提交
270

271 272 273
	addr = ((const volatile unsigned char *)ptr);
	addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
	return (*addr >> (nr & 7)) & 1;
L
Linus Torvalds 已提交
274 275
}

276
/*
277
 * Optimized find bit helper functions.
278
 */
279 280 281 282 283 284 285 286 287 288 289 290 291

/**
 * __ffz_word_loop - find byte offset of first long != -1UL
 * @addr: pointer to array of unsigned long
 * @size: size of the array in bits
 */
static inline unsigned long __ffz_word_loop(const unsigned long *addr,
					    unsigned long size)
{
	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
	unsigned long bytes = 0;

	asm volatile(
292
#ifndef CONFIG_64BIT
293 294 295
		"	ahi	%1,-1\n"
		"	sra	%1,5\n"
		"	jz	1f\n"
296 297 298 299 300 301
		"0:	c	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,4(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#else
302 303 304
		"	aghi	%1,-1\n"
		"	srag	%1,%1,6\n"
		"	jz	1f\n"
305 306 307 308 309 310
		"0:	cg	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,8(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#endif
311
		: "+&a" (bytes), "+&d" (size)
312 313 314 315 316 317 318 319 320 321 322 323
		: "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
		: "cc" );
	return bytes;
}

/**
 * __ffs_word_loop - find byte offset of first long != 0UL
 * @addr: pointer to array of unsigned long
 * @size: size of the array in bits
 */
static inline unsigned long __ffs_word_loop(const unsigned long *addr,
					    unsigned long size)
324
{
325 326
	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
	unsigned long bytes = 0;
327

328
	asm volatile(
329
#ifndef CONFIG_64BIT
330 331 332
		"	ahi	%1,-1\n"
		"	sra	%1,5\n"
		"	jz	1f\n"
333 334 335 336 337 338
		"0:	c	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,4(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#else
339 340 341
		"	aghi	%1,-1\n"
		"	srag	%1,%1,6\n"
		"	jz	1f\n"
342 343 344 345 346 347
		"0:	cg	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,8(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#endif
348
		: "+&a" (bytes), "+&a" (size)
349 350 351 352 353 354 355 356 357 358 359 360
		: "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
		: "cc" );
	return bytes;
}

/**
 * __ffz_word - add number of the first unset bit
 * @nr: base value the bit number is added to
 * @word: the word that is searched for unset bits
 */
static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
{
361
#ifdef CONFIG_64BIT
362
	if ((word & 0xffffffff) == 0xffffffff) {
363
		word >>= 32;
364
		nr += 32;
365 366
	}
#endif
367
	if ((word & 0xffff) == 0xffff) {
368
		word >>= 16;
369
		nr += 16;
370
	}
371
	if ((word & 0xff) == 0xff) {
372
		word >>= 8;
373
		nr += 8;
374
	}
375
	return nr + _zb_findmap[(unsigned char) word];
376 377
}

378 379 380 381
/**
 * __ffs_word - add number of the first set bit
 * @nr: base value the bit number is added to
 * @word: the word that is searched for set bits
382
 */
383
static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
384
{
385
#ifdef CONFIG_64BIT
386
	if ((word & 0xffffffff) == 0) {
387
		word >>= 32;
388
		nr += 32;
389 390
	}
#endif
391
	if ((word & 0xffff) == 0) {
392
		word >>= 16;
393
		nr += 16;
394
	}
395
	if ((word & 0xff) == 0) {
396
		word >>= 8;
397
		nr += 8;
398
	}
399
	return nr + _sb_findmap[(unsigned char) word];
400
}
L
Linus Torvalds 已提交
401

402

403 404 405 406 407 408 409 410 411 412 413
/**
 * __load_ulong_be - load big endian unsigned long
 * @p: pointer to array of unsigned long
 * @offset: byte offset of source value in the array
 */
static inline unsigned long __load_ulong_be(const unsigned long *p,
					    unsigned long offset)
{
	p = (unsigned long *)((unsigned long) p + offset);
	return *p;
}
414

415 416 417 418 419 420 421
/**
 * __load_ulong_le - load little endian unsigned long
 * @p: pointer to array of unsigned long
 * @offset: byte offset of source value in the array
 */
static inline unsigned long __load_ulong_le(const unsigned long *p,
					    unsigned long offset)
L
Linus Torvalds 已提交
422
{
423
	unsigned long word;
L
Linus Torvalds 已提交
424

425
	p = (unsigned long *)((unsigned long) p + offset);
426
#ifndef CONFIG_64BIT
427
	asm volatile(
428 429 430 431 432
		"	ic	%0,%O1(%R1)\n"
		"	icm	%0,2,%O1+1(%R1)\n"
		"	icm	%0,4,%O1+2(%R1)\n"
		"	icm	%0,8,%O1+3(%R1)"
		: "=&d" (word) : "Q" (*p) : "cc");
433 434 435 436 437 438
#else
	asm volatile(
		"	lrvg	%0,%1"
		: "=d" (word) : "m" (*p) );
#endif
	return word;
L
Linus Torvalds 已提交
439 440
}

441 442 443 444 445 446 447 448 449 450 451
/*
 * The various find bit functions.
 */

/*
 * ffz - find first zero in word.
 * @word: The word to search
 *
 * Undefined if no zero exists, so code should check against ~0UL first.
 */
static inline unsigned long ffz(unsigned long word)
L
Linus Torvalds 已提交
452
{
453 454
	return __ffz_word(0, word);
}
L
Linus Torvalds 已提交
455

456 457 458 459 460 461 462 463 464
/**
 * __ffs - find first bit in word.
 * @word: The word to search
 *
 * Undefined if no bit exists, so code should check against 0 first.
 */
static inline unsigned long __ffs (unsigned long word)
{
	return __ffs_word(0, word);
L
Linus Torvalds 已提交
465 466
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480
/**
 * ffs - find first bit set
 * @x: the word to search
 *
 * This is defined the same way as
 * the libc and compiler builtin ffs routines, therefore
 * differs in spirit from the above ffz (man ffs).
 */
static inline int ffs(int x)
{
	if (!x)
		return 0;
	return __ffs_word(1, x);
}
L
Linus Torvalds 已提交
481

482 483 484 485 486 487 488 489 490 491
/**
 * find_first_zero_bit - find the first zero bit in a memory region
 * @addr: The address to start the search at
 * @size: The maximum size to search
 *
 * Returns the bit-number of the first zero bit, not the number of the byte
 * containing a bit.
 */
static inline unsigned long find_first_zero_bit(const unsigned long *addr,
						unsigned long size)
L
Linus Torvalds 已提交
492
{
493
	unsigned long bytes, bits;
L
Linus Torvalds 已提交
494 495 496

        if (!size)
                return 0;
497 498 499 500
	bytes = __ffz_word_loop(addr, size);
	bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
	return (bits < size) ? bits : size;
}
501
#define find_first_zero_bit find_first_zero_bit
502 503 504 505 506 507 508 509 510 511 512

/**
 * find_first_bit - find the first set bit in a memory region
 * @addr: The address to start the search at
 * @size: The maximum size to search
 *
 * Returns the bit-number of the first set bit, not the number of the byte
 * containing a bit.
 */
static inline unsigned long find_first_bit(const unsigned long * addr,
					   unsigned long size)
L
Linus Torvalds 已提交
513
{
514
	unsigned long bytes, bits;
L
Linus Torvalds 已提交
515 516 517

        if (!size)
                return 0;
518 519 520
	bytes = __ffs_word_loop(addr, size);
	bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
	return (bits < size) ? bits : size;
L
Linus Torvalds 已提交
521
}
522
#define find_first_bit find_first_bit
L
Linus Torvalds 已提交
523

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/*
 * Big endian variant whichs starts bit counting from left using
 * the flogr (find leftmost one) instruction.
 */
static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
{
	register unsigned long bit asm("2") = val;
	register unsigned long out asm("3");

	asm volatile (
		"	.insn	rre,0xb9830000,%[bit],%[bit]\n"
		: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
	return nr + bit;
}

/*
 * 64 bit special left bitops format:
 * order in memory:
 *    00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
 *    10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
 *    20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
 *    30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
 * after that follows the next long with bit numbers
 *    40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
 *    50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
 *    60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
 *    70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
 * The reason for this bit ordering is the fact that
 * the hardware sets bits in a bitmap starting at bit 0
 * and we don't want to scan the bitmap from the 'wrong
 * end'.
 */
static inline unsigned long find_first_bit_left(const unsigned long *addr,
						unsigned long size)
{
	unsigned long bytes, bits;

	if (!size)
		return 0;
	bytes = __ffs_word_loop(addr, size);
	bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
	return (bits < size) ? bits : size;
}

static inline int find_next_bit_left(const unsigned long *addr,
				     unsigned long size,
				     unsigned long offset)
{
	const unsigned long *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
577
	bit = offset & (BITS_PER_LONG - 1);
578 579
	offset -= bit;
	size -= offset;
580
	p = addr + offset / BITS_PER_LONG;
581
	if (bit) {
582
		set = __flo_word(0, *p & (~0UL >> bit));
583 584
		if (set >= size)
			return size + offset;
585
		if (set < BITS_PER_LONG)
586
			return set + offset;
587 588
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
		p++;
	}
	return offset + find_first_bit_left(p, size);
}

#define for_each_set_bit_left(bit, addr, size)				\
	for ((bit) = find_first_bit_left((addr), (size));		\
	     (bit) < (size);						\
	     (bit) = find_next_bit_left((addr), (size), (bit) + 1))

/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_left_cont(bit, addr, size)			\
	for ((bit) = find_next_bit_left((addr), (size), (bit));		\
	     (bit) < (size);						\
	     (bit) = find_next_bit_left((addr), (size), (bit) + 1))

605 606 607 608 609 610 611 612 613
/**
 * find_next_zero_bit - find the first zero bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The maximum size to search
 */
static inline int find_next_zero_bit (const unsigned long * addr,
				      unsigned long size,
				      unsigned long offset)
L
Linus Torvalds 已提交
614
{
615 616 617 618 619
        const unsigned long *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
620
	bit = offset & (BITS_PER_LONG - 1);
621 622
	offset -= bit;
	size -= offset;
623
	p = addr + offset / BITS_PER_LONG;
624 625
	if (bit) {
		/*
626
		 * __ffz_word returns BITS_PER_LONG
627 628
		 * if no zero bit is present in the word.
		 */
E
Eric Sandeen 已提交
629
		set = __ffz_word(bit, *p >> bit);
630 631
		if (set >= size)
			return size + offset;
632
		if (set < BITS_PER_LONG)
633
			return set + offset;
634 635
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
636
		p++;
L
Linus Torvalds 已提交
637
	}
638
	return offset + find_first_zero_bit(p, size);
L
Linus Torvalds 已提交
639
}
640
#define find_next_zero_bit find_next_zero_bit
L
Linus Torvalds 已提交
641

642 643 644 645 646 647 648 649 650
/**
 * find_next_bit - find the first set bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The maximum size to search
 */
static inline int find_next_bit (const unsigned long * addr,
				 unsigned long size,
				 unsigned long offset)
L
Linus Torvalds 已提交
651
{
652 653 654 655 656
        const unsigned long *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
657
	bit = offset & (BITS_PER_LONG - 1);
658 659
	offset -= bit;
	size -= offset;
660
	p = addr + offset / BITS_PER_LONG;
661 662
	if (bit) {
		/*
663
		 * __ffs_word returns BITS_PER_LONG
664 665
		 * if no one bit is present in the word.
		 */
666
		set = __ffs_word(0, *p & (~0UL << bit));
667 668
		if (set >= size)
			return size + offset;
669
		if (set < BITS_PER_LONG)
670
			return set + offset;
671 672
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
673
		p++;
L
Linus Torvalds 已提交
674
	}
675
	return offset + find_first_bit(p, size);
L
Linus Torvalds 已提交
676
}
677
#define find_next_bit find_next_bit
L
Linus Torvalds 已提交
678 679 680 681 682 683 684 685 686 687 688 689

/*
 * Every architecture must define this function. It's the fastest
 * way of searching a 140-bit bitmap where the first 100 bits are
 * unlikely to be set. It's guaranteed that at least one of the 140
 * bits is cleared.
 */
static inline int sched_find_first_bit(unsigned long *b)
{
	return find_first_bit(b, 140);
}

690
#include <asm-generic/bitops/fls.h>
691
#include <asm-generic/bitops/__fls.h>
692
#include <asm-generic/bitops/fls64.h>
L
Linus Torvalds 已提交
693

694
#include <asm-generic/bitops/hweight.h>
N
Nick Piggin 已提交
695
#include <asm-generic/bitops/lock.h>
L
Linus Torvalds 已提交
696 697 698 699 700 701 702 703 704 705 706

/*
 * ATTENTION: intel byte ordering convention for ext2 and minix !!
 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
 * bit 32 is the LSB of (addr+4).
 * That combined with the little endian byte order of Intel gives the
 * following bit order in memory:
 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
 */

707
static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
L
Linus Torvalds 已提交
708
{
709
	unsigned long bytes, bits;
L
Linus Torvalds 已提交
710 711 712

        if (!size)
                return 0;
713 714 715
	bytes = __ffz_word_loop(vaddr, size);
	bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
	return (bits < size) ? bits : size;
L
Linus Torvalds 已提交
716
}
717
#define find_first_zero_bit_le find_first_zero_bit_le
L
Linus Torvalds 已提交
718

719
static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
720
					  unsigned long offset)
L
Linus Torvalds 已提交
721
{
722
        unsigned long *addr = vaddr, *p;
723
	unsigned long bit, set;
L
Linus Torvalds 已提交
724 725 726

        if (offset >= size)
                return size;
727
	bit = offset & (BITS_PER_LONG - 1);
728 729
	offset -= bit;
	size -= offset;
730
	p = addr + offset / BITS_PER_LONG;
L
Linus Torvalds 已提交
731
        if (bit) {
732
		/*
733
		 * s390 version of ffz returns BITS_PER_LONG
734 735
		 * if no zero bit is present in the word.
		 */
E
Eric Sandeen 已提交
736
		set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
737 738
		if (set >= size)
			return size + offset;
739
		if (set < BITS_PER_LONG)
740
			return set + offset;
741 742
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
743
		p++;
L
Linus Torvalds 已提交
744
        }
745
	return offset + find_first_zero_bit_le(p, size);
L
Linus Torvalds 已提交
746
}
747
#define find_next_zero_bit_le find_next_zero_bit_le
L
Linus Torvalds 已提交
748

749
static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
750 751 752 753 754 755 756 757 758
{
	unsigned long bytes, bits;

	if (!size)
		return 0;
	bytes = __ffs_word_loop(vaddr, size);
	bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
	return (bits < size) ? bits : size;
}
759
#define find_first_bit_le find_first_bit_le
760

761
static inline int find_next_bit_le(void *vaddr, unsigned long size,
762 763 764 765 766 767 768
				     unsigned long offset)
{
	unsigned long *addr = vaddr, *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
769
	bit = offset & (BITS_PER_LONG - 1);
770 771
	offset -= bit;
	size -= offset;
772
	p = addr + offset / BITS_PER_LONG;
773 774
	if (bit) {
		/*
775
		 * s390 version of ffz returns BITS_PER_LONG
776 777
		 * if no zero bit is present in the word.
		 */
E
Eric Sandeen 已提交
778
		set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
779 780
		if (set >= size)
			return size + offset;
781
		if (set < BITS_PER_LONG)
782
			return set + offset;
783 784
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
785 786
		p++;
	}
787 788
	return offset + find_first_bit_le(p, size);
}
789
#define find_next_bit_le find_next_bit_le
790

A
Akinobu Mita 已提交
791 792
#include <asm-generic/bitops/le.h>

793
#include <asm-generic/bitops/ext2-atomic-setbit.h>
794

L
Linus Torvalds 已提交
795
#endif /* _S390_BITOPS_H */