bitops.h 20.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  S390 version
3
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
4 5 6 7 8 9
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/bitops.h"
 *    Copyright (C) 1992, Linus Torvalds
 *
 */
H
Heiko Carstens 已提交
10

11 12 13
#ifndef _S390_BITOPS_H
#define _S390_BITOPS_H

J
Jiri Slaby 已提交
14 15 16 17
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif

18
#include <linux/typecheck.h>
L
Linus Torvalds 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#include <linux/compiler.h>

/*
 * 32 bit bitops format:
 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
 * bit 32 is the LSB of *(addr+4). That combined with the
 * big endian byte order on S390 give the following bit
 * order in memory:
 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
 * after that follows the next long with bit numbers
 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
 * The reason for this bit ordering is the fact that
 * in the architecture independent code bits operations
 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
 * with operation of the form "set_bit(bitnr, flags)".
 *
 * 64 bit bitops format:
 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
 * bit 64 is the LSB of *(addr+8). That combined with the
 * big endian byte order on S390 give the following bit
 * order in memory:
 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
 * after that follows the next long with bit numbers
 *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
 * The reason for this bit ordering is the fact that
 * in the architecture independent code bits operations
 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
 * with operation of the form "set_bit(bitnr, flags)".
 */

57
/* bitmap tables from arch/s390/kernel/bitmap.c */
L
Linus Torvalds 已提交
58 59 60
extern const char _zb_findmap[];
extern const char _sb_findmap[];

61
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
62 63 64 65 66

#define __BITOPS_OR		"or"
#define __BITOPS_AND		"nr"
#define __BITOPS_XOR		"xr"

67 68 69 70
#define __BITOPS_LOOP(__addr, __val, __op_string)		\
({								\
	unsigned long __old, __new;				\
								\
71
	typecheck(unsigned long *, (__addr));			\
72 73 74 75 76 77
	asm volatile(						\
		"	l	%0,%2\n"			\
		"0:	lr	%1,%0\n"			\
		__op_string "	%1,%3\n"			\
		"	cs	%0,%1,%2\n"			\
		"	jl	0b"				\
78 79
		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
		: "d" (__val)					\
80 81 82
		: "cc");					\
	__old;							\
})
83

84
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
85

86 87 88 89 90 91 92 93 94 95
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

#define __BITOPS_OR		"laog"
#define __BITOPS_AND		"lang"
#define __BITOPS_XOR		"laxg"

#define __BITOPS_LOOP(__addr, __val, __op_string)		\
({								\
	unsigned long __old;					\
								\
96
	typecheck(unsigned long *, (__addr));			\
97 98
	asm volatile(						\
		__op_string "	%0,%2,%1\n"			\
99
		: "=d" (__old),	"+Q" (*(__addr))		\
100 101 102 103 104 105 106
		: "d" (__val)					\
		: "cc");					\
	__old;							\
})

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

L
Linus Torvalds 已提交
107 108 109 110
#define __BITOPS_OR		"ogr"
#define __BITOPS_AND		"ngr"
#define __BITOPS_XOR		"xgr"

111 112 113 114
#define __BITOPS_LOOP(__addr, __val, __op_string)		\
({								\
	unsigned long __old, __new;				\
								\
115
	typecheck(unsigned long *, (__addr));			\
116 117 118 119 120 121
	asm volatile(						\
		"	lg	%0,%2\n"			\
		"0:	lgr	%1,%0\n"			\
		__op_string "	%1,%3\n"			\
		"	csg	%0,%1,%2\n"			\
		"	jl	0b"				\
122 123
		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
		: "d" (__val)					\
124 125 126 127 128
		: "cc");					\
	__old;							\
})

#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
129

130
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
131

132
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
L
Linus Torvalds 已提交
133

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static inline unsigned long *
__bitops_word(unsigned long nr, volatile unsigned long *ptr)
{
	unsigned long addr;

	addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
	return (unsigned long *)addr;
}

static inline unsigned char *
__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
	return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
}

static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
150
{
151 152
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long mask;
L
Linus Torvalds 已提交
153

154 155 156 157 158 159 160 161 162 163 164 165
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
	if (__builtin_constant_p(nr)) {
		unsigned char *caddr = __bitops_byte(nr, ptr);

		asm volatile(
			"oi	%0,%b1\n"
			: "+Q" (*caddr)
			: "i" (1 << (nr & 7))
			: "cc");
		return;
	}
#endif
166
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
167
	__BITOPS_LOOP(addr, mask, __BITOPS_OR);
L
Linus Torvalds 已提交
168 169
}

170
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
171
{
172 173
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long mask;
L
Linus Torvalds 已提交
174

175 176 177 178 179 180 181 182 183 184 185 186
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
	if (__builtin_constant_p(nr)) {
		unsigned char *caddr = __bitops_byte(nr, ptr);

		asm volatile(
			"ni	%0,%b1\n"
			: "+Q" (*caddr)
			: "i" (~(1 << (nr & 7)))
			: "cc");
		return;
	}
#endif
187
	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
188
	__BITOPS_LOOP(addr, mask, __BITOPS_AND);
L
Linus Torvalds 已提交
189 190
}

191
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
192
{
193 194
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long mask;
L
Linus Torvalds 已提交
195

196 197 198 199 200 201 202 203 204 205 206 207
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
	if (__builtin_constant_p(nr)) {
		unsigned char *caddr = __bitops_byte(nr, ptr);

		asm volatile(
			"xi	%0,%b1\n"
			: "+Q" (*caddr)
			: "i" (1 << (nr & 7))
			: "cc");
		return;
	}
#endif
208
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
209
	__BITOPS_LOOP(addr, mask, __BITOPS_XOR);
L
Linus Torvalds 已提交
210 211 212
}

static inline int
213
test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
214
{
215 216
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long old, mask;
L
Linus Torvalds 已提交
217

218
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
219
	old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
220
	barrier();
L
Linus Torvalds 已提交
221 222 223 224
	return (old & mask) != 0;
}

static inline int
225
test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
226
{
227 228
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long old, mask;
L
Linus Torvalds 已提交
229

230
	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
231
	old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
232
	barrier();
233
	return (old & ~mask) != 0;
L
Linus Torvalds 已提交
234 235 236
}

static inline int
237
test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
238
{
239 240
	unsigned long *addr = __bitops_word(nr, ptr);
	unsigned long old, mask;
L
Linus Torvalds 已提交
241

242
	mask = 1UL << (nr & (BITS_PER_LONG - 1));
243
	old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
244
	barrier();
L
Linus Torvalds 已提交
245 246 247 248 249
	return (old & mask) != 0;
}

static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
250
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
251

252
	*addr |= 1 << (nr & 7);
L
Linus Torvalds 已提交
253 254 255 256 257
}

static inline void 
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
258
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
259

260
	*addr &= ~(1 << (nr & 7));
L
Linus Torvalds 已提交
261 262 263 264
}

static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
265
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
266

267
	*addr ^= 1 << (nr & 7);
L
Linus Torvalds 已提交
268 269 270
}

static inline int
271
__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
272
{
273
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
274 275
	unsigned char ch;

276 277
	ch = *addr;
	*addr |= 1 << (nr & 7);
L
Linus Torvalds 已提交
278 279 280 281
	return (ch >> (nr & 7)) & 1;
}

static inline int
282
__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
283
{
284
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
285 286
	unsigned char ch;

287 288
	ch = *addr;
	*addr &= ~(1 << (nr & 7));
L
Linus Torvalds 已提交
289 290 291 292
	return (ch >> (nr & 7)) & 1;
}

static inline int
293
__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
L
Linus Torvalds 已提交
294
{
295
	unsigned char *addr = __bitops_byte(nr, ptr);
L
Linus Torvalds 已提交
296 297
	unsigned char ch;

298 299
	ch = *addr;
	*addr ^= 1 << (nr & 7);
L
Linus Torvalds 已提交
300 301 302
	return (ch >> (nr & 7)) & 1;
}

303
static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
L
Linus Torvalds 已提交
304
{
305
	const volatile unsigned char *addr;
L
Linus Torvalds 已提交
306

307 308 309
	addr = ((const volatile unsigned char *)ptr);
	addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
	return (*addr >> (nr & 7)) & 1;
L
Linus Torvalds 已提交
310 311
}

312
/*
313
 * Optimized find bit helper functions.
314
 */
315 316 317 318 319 320 321 322 323 324 325 326 327

/**
 * __ffz_word_loop - find byte offset of first long != -1UL
 * @addr: pointer to array of unsigned long
 * @size: size of the array in bits
 */
static inline unsigned long __ffz_word_loop(const unsigned long *addr,
					    unsigned long size)
{
	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
	unsigned long bytes = 0;

	asm volatile(
328
#ifndef CONFIG_64BIT
329 330 331
		"	ahi	%1,-1\n"
		"	sra	%1,5\n"
		"	jz	1f\n"
332 333 334 335 336 337
		"0:	c	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,4(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#else
338 339 340
		"	aghi	%1,-1\n"
		"	srag	%1,%1,6\n"
		"	jz	1f\n"
341 342 343 344 345 346
		"0:	cg	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,8(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#endif
347
		: "+&a" (bytes), "+&d" (size)
348 349 350 351 352 353 354 355 356 357 358 359
		: "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
		: "cc" );
	return bytes;
}

/**
 * __ffs_word_loop - find byte offset of first long != 0UL
 * @addr: pointer to array of unsigned long
 * @size: size of the array in bits
 */
static inline unsigned long __ffs_word_loop(const unsigned long *addr,
					    unsigned long size)
360
{
361 362
	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
	unsigned long bytes = 0;
363

364
	asm volatile(
365
#ifndef CONFIG_64BIT
366 367 368
		"	ahi	%1,-1\n"
		"	sra	%1,5\n"
		"	jz	1f\n"
369 370 371 372 373 374
		"0:	c	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,4(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#else
375 376 377
		"	aghi	%1,-1\n"
		"	srag	%1,%1,6\n"
		"	jz	1f\n"
378 379 380 381 382 383
		"0:	cg	%2,0(%0,%3)\n"
		"	jne	1f\n"
		"	la	%0,8(%0)\n"
		"	brct	%1,0b\n"
		"1:\n"
#endif
384
		: "+&a" (bytes), "+&a" (size)
385 386 387 388 389 390 391 392 393 394 395 396
		: "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
		: "cc" );
	return bytes;
}

/**
 * __ffz_word - add number of the first unset bit
 * @nr: base value the bit number is added to
 * @word: the word that is searched for unset bits
 */
static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
{
397
#ifdef CONFIG_64BIT
398
	if ((word & 0xffffffff) == 0xffffffff) {
399
		word >>= 32;
400
		nr += 32;
401 402
	}
#endif
403
	if ((word & 0xffff) == 0xffff) {
404
		word >>= 16;
405
		nr += 16;
406
	}
407
	if ((word & 0xff) == 0xff) {
408
		word >>= 8;
409
		nr += 8;
410
	}
411
	return nr + _zb_findmap[(unsigned char) word];
412 413
}

414 415 416 417
/**
 * __ffs_word - add number of the first set bit
 * @nr: base value the bit number is added to
 * @word: the word that is searched for set bits
418
 */
419
static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
420
{
421
#ifdef CONFIG_64BIT
422
	if ((word & 0xffffffff) == 0) {
423
		word >>= 32;
424
		nr += 32;
425 426
	}
#endif
427
	if ((word & 0xffff) == 0) {
428
		word >>= 16;
429
		nr += 16;
430
	}
431
	if ((word & 0xff) == 0) {
432
		word >>= 8;
433
		nr += 8;
434
	}
435
	return nr + _sb_findmap[(unsigned char) word];
436
}
L
Linus Torvalds 已提交
437

438

439 440 441 442 443 444 445 446 447 448 449
/**
 * __load_ulong_be - load big endian unsigned long
 * @p: pointer to array of unsigned long
 * @offset: byte offset of source value in the array
 */
static inline unsigned long __load_ulong_be(const unsigned long *p,
					    unsigned long offset)
{
	p = (unsigned long *)((unsigned long) p + offset);
	return *p;
}
450

451 452 453 454 455 456 457
/**
 * __load_ulong_le - load little endian unsigned long
 * @p: pointer to array of unsigned long
 * @offset: byte offset of source value in the array
 */
static inline unsigned long __load_ulong_le(const unsigned long *p,
					    unsigned long offset)
L
Linus Torvalds 已提交
458
{
459
	unsigned long word;
L
Linus Torvalds 已提交
460

461
	p = (unsigned long *)((unsigned long) p + offset);
462
#ifndef CONFIG_64BIT
463
	asm volatile(
464 465 466 467 468
		"	ic	%0,%O1(%R1)\n"
		"	icm	%0,2,%O1+1(%R1)\n"
		"	icm	%0,4,%O1+2(%R1)\n"
		"	icm	%0,8,%O1+3(%R1)"
		: "=&d" (word) : "Q" (*p) : "cc");
469 470 471 472 473 474
#else
	asm volatile(
		"	lrvg	%0,%1"
		: "=d" (word) : "m" (*p) );
#endif
	return word;
L
Linus Torvalds 已提交
475 476
}

477 478 479 480 481 482 483 484 485 486 487
/*
 * The various find bit functions.
 */

/*
 * ffz - find first zero in word.
 * @word: The word to search
 *
 * Undefined if no zero exists, so code should check against ~0UL first.
 */
static inline unsigned long ffz(unsigned long word)
L
Linus Torvalds 已提交
488
{
489 490
	return __ffz_word(0, word);
}
L
Linus Torvalds 已提交
491

492 493 494 495 496 497 498 499 500
/**
 * __ffs - find first bit in word.
 * @word: The word to search
 *
 * Undefined if no bit exists, so code should check against 0 first.
 */
static inline unsigned long __ffs (unsigned long word)
{
	return __ffs_word(0, word);
L
Linus Torvalds 已提交
501 502
}

503 504 505 506 507 508 509 510 511 512 513 514 515 516
/**
 * ffs - find first bit set
 * @x: the word to search
 *
 * This is defined the same way as
 * the libc and compiler builtin ffs routines, therefore
 * differs in spirit from the above ffz (man ffs).
 */
static inline int ffs(int x)
{
	if (!x)
		return 0;
	return __ffs_word(1, x);
}
L
Linus Torvalds 已提交
517

518 519 520 521 522 523 524 525 526 527
/**
 * find_first_zero_bit - find the first zero bit in a memory region
 * @addr: The address to start the search at
 * @size: The maximum size to search
 *
 * Returns the bit-number of the first zero bit, not the number of the byte
 * containing a bit.
 */
static inline unsigned long find_first_zero_bit(const unsigned long *addr,
						unsigned long size)
L
Linus Torvalds 已提交
528
{
529
	unsigned long bytes, bits;
L
Linus Torvalds 已提交
530 531 532

        if (!size)
                return 0;
533 534 535 536
	bytes = __ffz_word_loop(addr, size);
	bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
	return (bits < size) ? bits : size;
}
537
#define find_first_zero_bit find_first_zero_bit
538 539 540 541 542 543 544 545 546 547 548

/**
 * find_first_bit - find the first set bit in a memory region
 * @addr: The address to start the search at
 * @size: The maximum size to search
 *
 * Returns the bit-number of the first set bit, not the number of the byte
 * containing a bit.
 */
static inline unsigned long find_first_bit(const unsigned long * addr,
					   unsigned long size)
L
Linus Torvalds 已提交
549
{
550
	unsigned long bytes, bits;
L
Linus Torvalds 已提交
551 552 553

        if (!size)
                return 0;
554 555 556
	bytes = __ffs_word_loop(addr, size);
	bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
	return (bits < size) ? bits : size;
L
Linus Torvalds 已提交
557
}
558
#define find_first_bit find_first_bit
L
Linus Torvalds 已提交
559

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
/*
 * Big endian variant whichs starts bit counting from left using
 * the flogr (find leftmost one) instruction.
 */
static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
{
	register unsigned long bit asm("2") = val;
	register unsigned long out asm("3");

	asm volatile (
		"	.insn	rre,0xb9830000,%[bit],%[bit]\n"
		: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
	return nr + bit;
}

/*
 * 64 bit special left bitops format:
 * order in memory:
 *    00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
 *    10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
 *    20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
 *    30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
 * after that follows the next long with bit numbers
 *    40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
 *    50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
 *    60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
 *    70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
 * The reason for this bit ordering is the fact that
 * the hardware sets bits in a bitmap starting at bit 0
 * and we don't want to scan the bitmap from the 'wrong
 * end'.
 */
static inline unsigned long find_first_bit_left(const unsigned long *addr,
						unsigned long size)
{
	unsigned long bytes, bits;

	if (!size)
		return 0;
	bytes = __ffs_word_loop(addr, size);
	bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
	return (bits < size) ? bits : size;
}

static inline int find_next_bit_left(const unsigned long *addr,
				     unsigned long size,
				     unsigned long offset)
{
	const unsigned long *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
613
	bit = offset & (BITS_PER_LONG - 1);
614 615
	offset -= bit;
	size -= offset;
616
	p = addr + offset / BITS_PER_LONG;
617
	if (bit) {
618
		set = __flo_word(0, *p & (~0UL >> bit));
619 620
		if (set >= size)
			return size + offset;
621
		if (set < BITS_PER_LONG)
622
			return set + offset;
623 624
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
		p++;
	}
	return offset + find_first_bit_left(p, size);
}

#define for_each_set_bit_left(bit, addr, size)				\
	for ((bit) = find_first_bit_left((addr), (size));		\
	     (bit) < (size);						\
	     (bit) = find_next_bit_left((addr), (size), (bit) + 1))

/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_left_cont(bit, addr, size)			\
	for ((bit) = find_next_bit_left((addr), (size), (bit));		\
	     (bit) < (size);						\
	     (bit) = find_next_bit_left((addr), (size), (bit) + 1))

641 642 643 644 645 646 647 648 649
/**
 * find_next_zero_bit - find the first zero bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The maximum size to search
 */
static inline int find_next_zero_bit (const unsigned long * addr,
				      unsigned long size,
				      unsigned long offset)
L
Linus Torvalds 已提交
650
{
651 652 653 654 655
        const unsigned long *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
656
	bit = offset & (BITS_PER_LONG - 1);
657 658
	offset -= bit;
	size -= offset;
659
	p = addr + offset / BITS_PER_LONG;
660 661
	if (bit) {
		/*
662
		 * __ffz_word returns BITS_PER_LONG
663 664
		 * if no zero bit is present in the word.
		 */
E
Eric Sandeen 已提交
665
		set = __ffz_word(bit, *p >> bit);
666 667
		if (set >= size)
			return size + offset;
668
		if (set < BITS_PER_LONG)
669
			return set + offset;
670 671
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
672
		p++;
L
Linus Torvalds 已提交
673
	}
674
	return offset + find_first_zero_bit(p, size);
L
Linus Torvalds 已提交
675
}
676
#define find_next_zero_bit find_next_zero_bit
L
Linus Torvalds 已提交
677

678 679 680 681 682 683 684 685 686
/**
 * find_next_bit - find the first set bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The maximum size to search
 */
static inline int find_next_bit (const unsigned long * addr,
				 unsigned long size,
				 unsigned long offset)
L
Linus Torvalds 已提交
687
{
688 689 690 691 692
        const unsigned long *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
693
	bit = offset & (BITS_PER_LONG - 1);
694 695
	offset -= bit;
	size -= offset;
696
	p = addr + offset / BITS_PER_LONG;
697 698
	if (bit) {
		/*
699
		 * __ffs_word returns BITS_PER_LONG
700 701
		 * if no one bit is present in the word.
		 */
702
		set = __ffs_word(0, *p & (~0UL << bit));
703 704
		if (set >= size)
			return size + offset;
705
		if (set < BITS_PER_LONG)
706
			return set + offset;
707 708
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
709
		p++;
L
Linus Torvalds 已提交
710
	}
711
	return offset + find_first_bit(p, size);
L
Linus Torvalds 已提交
712
}
713
#define find_next_bit find_next_bit
L
Linus Torvalds 已提交
714 715 716 717 718 719 720 721 722 723 724 725

/*
 * Every architecture must define this function. It's the fastest
 * way of searching a 140-bit bitmap where the first 100 bits are
 * unlikely to be set. It's guaranteed that at least one of the 140
 * bits is cleared.
 */
static inline int sched_find_first_bit(unsigned long *b)
{
	return find_first_bit(b, 140);
}

726
#include <asm-generic/bitops/fls.h>
727
#include <asm-generic/bitops/__fls.h>
728
#include <asm-generic/bitops/fls64.h>
L
Linus Torvalds 已提交
729

730
#include <asm-generic/bitops/hweight.h>
N
Nick Piggin 已提交
731
#include <asm-generic/bitops/lock.h>
L
Linus Torvalds 已提交
732 733 734 735 736 737 738 739 740 741 742

/*
 * ATTENTION: intel byte ordering convention for ext2 and minix !!
 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
 * bit 32 is the LSB of (addr+4).
 * That combined with the little endian byte order of Intel gives the
 * following bit order in memory:
 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
 */

743
static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
L
Linus Torvalds 已提交
744
{
745
	unsigned long bytes, bits;
L
Linus Torvalds 已提交
746 747 748

        if (!size)
                return 0;
749 750 751
	bytes = __ffz_word_loop(vaddr, size);
	bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
	return (bits < size) ? bits : size;
L
Linus Torvalds 已提交
752
}
753
#define find_first_zero_bit_le find_first_zero_bit_le
L
Linus Torvalds 已提交
754

755
static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
756
					  unsigned long offset)
L
Linus Torvalds 已提交
757
{
758
        unsigned long *addr = vaddr, *p;
759
	unsigned long bit, set;
L
Linus Torvalds 已提交
760 761 762

        if (offset >= size)
                return size;
763
	bit = offset & (BITS_PER_LONG - 1);
764 765
	offset -= bit;
	size -= offset;
766
	p = addr + offset / BITS_PER_LONG;
L
Linus Torvalds 已提交
767
        if (bit) {
768
		/*
769
		 * s390 version of ffz returns BITS_PER_LONG
770 771
		 * if no zero bit is present in the word.
		 */
E
Eric Sandeen 已提交
772
		set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
773 774
		if (set >= size)
			return size + offset;
775
		if (set < BITS_PER_LONG)
776
			return set + offset;
777 778
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
779
		p++;
L
Linus Torvalds 已提交
780
        }
781
	return offset + find_first_zero_bit_le(p, size);
L
Linus Torvalds 已提交
782
}
783
#define find_next_zero_bit_le find_next_zero_bit_le
L
Linus Torvalds 已提交
784

785
static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
786 787 788 789 790 791 792 793 794
{
	unsigned long bytes, bits;

	if (!size)
		return 0;
	bytes = __ffs_word_loop(vaddr, size);
	bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
	return (bits < size) ? bits : size;
}
795
#define find_first_bit_le find_first_bit_le
796

797
static inline int find_next_bit_le(void *vaddr, unsigned long size,
798 799 800 801 802 803 804
				     unsigned long offset)
{
	unsigned long *addr = vaddr, *p;
	unsigned long bit, set;

	if (offset >= size)
		return size;
805
	bit = offset & (BITS_PER_LONG - 1);
806 807
	offset -= bit;
	size -= offset;
808
	p = addr + offset / BITS_PER_LONG;
809 810
	if (bit) {
		/*
811
		 * s390 version of ffz returns BITS_PER_LONG
812 813
		 * if no zero bit is present in the word.
		 */
E
Eric Sandeen 已提交
814
		set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
815 816
		if (set >= size)
			return size + offset;
817
		if (set < BITS_PER_LONG)
818
			return set + offset;
819 820
		offset += BITS_PER_LONG;
		size -= BITS_PER_LONG;
821 822
		p++;
	}
823 824
	return offset + find_first_bit_le(p, size);
}
825
#define find_next_bit_le find_next_bit_le
826

A
Akinobu Mita 已提交
827 828
#include <asm-generic/bitops/le.h>

829
#include <asm-generic/bitops/ext2-atomic-setbit.h>
830

L
Linus Torvalds 已提交
831
#endif /* _S390_BITOPS_H */