bitops.h 4.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>

J
Jiri Slaby 已提交
5
#ifdef	__KERNEL__
J
Jiri Slaby 已提交
6
#define BIT(nr)			(1UL << (nr))
J
Jiri Slaby 已提交
7 8
#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
J
Jiri Slaby 已提交
9
#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_LONG)
J
Jiri Slaby 已提交
10 11 12
#define BITS_PER_BYTE		8
#endif

L
Linus Torvalds 已提交
13 14 15 16 17 18
/*
 * Include this here because some architectures need generic_ffs/fls in
 * scope
 */
#include <asm/bitops.h>

19 20 21 22 23 24
#define for_each_bit(bit, addr, size) \
	for ((bit) = find_first_bit((addr), (size)); \
	     (bit) < (size); \
	     (bit) = find_next_bit((addr), (size), (bit) + 1))


L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32
static __inline__ int get_bitmask_order(unsigned int count)
{
	int order;
	
	order = fls(count);
	return order;	/* We could be slightly more clever with -1 here... */
}

33 34 35 36 37 38 39 40 41 42
static __inline__ int get_count_order(unsigned int count)
{
	int order;
	
	order = fls(count) - 1;
	if (count & (count - 1))
		order++;
	return order;
}

L
Linus Torvalds 已提交
43 44
static inline unsigned long hweight_long(unsigned long w)
{
45
	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
L
Linus Torvalds 已提交
46 47
}

48
/**
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57
 * rol32 - rotate a 32-bit value left
 * @word: value to rotate
 * @shift: bits to roll
 */
static inline __u32 rol32(__u32 word, unsigned int shift)
{
	return (word << shift) | (word >> (32 - shift));
}

58
/**
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67
 * ror32 - rotate a 32-bit value right
 * @word: value to rotate
 * @shift: bits to roll
 */
static inline __u32 ror32(__u32 word, unsigned int shift)
{
	return (word >> shift) | (word << (32 - shift));
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
/**
 * rol16 - rotate a 16-bit value left
 * @word: value to rotate
 * @shift: bits to roll
 */
static inline __u16 rol16(__u16 word, unsigned int shift)
{
	return (word << shift) | (word >> (16 - shift));
}

/**
 * ror16 - rotate a 16-bit value right
 * @word: value to rotate
 * @shift: bits to roll
 */
static inline __u16 ror16(__u16 word, unsigned int shift)
{
	return (word >> shift) | (word << (16 - shift));
}

/**
 * rol8 - rotate an 8-bit value left
 * @word: value to rotate
 * @shift: bits to roll
 */
static inline __u8 rol8(__u8 word, unsigned int shift)
{
	return (word << shift) | (word >> (8 - shift));
}

/**
 * ror8 - rotate an 8-bit value right
 * @word: value to rotate
 * @shift: bits to roll
 */
static inline __u8 ror8(__u8 word, unsigned int shift)
{
	return (word >> shift) | (word << (8 - shift));
}

108 109 110 111 112 113 114
static inline unsigned fls_long(unsigned long l)
{
	if (sizeof(l) == 4)
		return fls(l);
	return fls64(l);
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
#ifdef __KERNEL__
#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
extern unsigned long __find_next_bit(const unsigned long *addr,
		unsigned long size, unsigned long offset);

/**
 * find_next_bit - find the next set bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The bitmap size in bits
 */
static __always_inline unsigned long
find_next_bit(const unsigned long *addr, unsigned long size,
		unsigned long offset)
{
	unsigned long value;

	/* Avoid a function call if the bitmap size is a constant */
	/* and not bigger than BITS_PER_LONG. */

	/* insert a sentinel so that __ffs returns size if there */
	/* are no set bits in the bitmap */
	if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
		value = (*addr) & ((~0ul) << offset);
		value |= (1ul << size);
		return __ffs(value);
	}

	/* the result of __ffs(0) is undefined, so it needs to be */
	/* handled separately */
	if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
		value = (*addr) & ((~0ul) << offset);
		return (value == 0) ? BITS_PER_LONG : __ffs(value);
	}

	/* size is not constant or too big */
	return __find_next_bit(addr, size, offset);
}

extern unsigned long __find_next_zero_bit(const unsigned long *addr,
		unsigned long size, unsigned long offset);

/**
 * find_next_zero_bit - find the next cleared bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The bitmap size in bits
 */
static __always_inline unsigned long
find_next_zero_bit(const unsigned long *addr, unsigned long size,
		unsigned long offset)
{
	unsigned long value;

	/* Avoid a function call if the bitmap size is a constant */
	/* and not bigger than BITS_PER_LONG. */

	/* insert a sentinel so that __ffs returns size if there */
	/* are no set bits in the bitmap */
	if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
		value = (~(*addr)) & ((~0ul) << offset);
		value |= (1ul << size);
		return __ffs(value);
	}

	/* the result of __ffs(0) is undefined, so it needs to be */
	/* handled separately */
	if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
		value = (~(*addr)) & ((~0ul) << offset);
		return (value == 0) ? BITS_PER_LONG : __ffs(value);
	}

	/* size is not constant or too big */
	return __find_next_zero_bit(addr, size, offset);
}
#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
#endif /* __KERNEL__ */
L
Linus Torvalds 已提交
192
#endif