bitops.h 9.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * include/asm-v850/bitops.h -- Bit operations
 *
4 5
 *  Copyright (C) 2001,02,03,04,05  NEC Electronics Corporation
 *  Copyright (C) 2001,02,03,04,05  Miles Bader <miles@gnu.org>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *  Copyright (C) 1992  Linus Torvalds.
 *
 * This file is subject to the terms and conditions of the GNU General
 * Public License.  See the file COPYING in the main directory of this
 * archive for more details.
 */

#ifndef __V850_BITOPS_H__
#define __V850_BITOPS_H__


#include <linux/config.h>
#include <linux/compiler.h>	/* unlikely  */
#include <asm/byteorder.h>	/* swab32 */
#include <asm/system.h>		/* interrupt enable/disable */


#ifdef __KERNEL__

/*
 * The __ functions are not atomic
 */

/*
 * ffz = Find First Zero in word. Undefined if no zero exists,
 * so code should check against ~0UL first..
 */
33
static inline unsigned long ffz (unsigned long word)
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
{
	unsigned long result = 0;

	while (word & 1) {
		result++;
		word >>= 1;
	}
	return result;
}


/* In the following constant-bit-op macros, a "g" constraint is used when
   we really need an integer ("i" constraint).  This is to avoid
   warnings/errors from the compiler in the case where the associated
   operand _isn't_ an integer, and shouldn't produce bogus assembly because
   use of that form is protected by a guard statement that checks for
   constants, and should otherwise be removed by the optimizer.  This
   _usually_ works -- however, __builtin_constant_p returns true for a
   variable with a known constant value too, and unfortunately gcc will
   happily put the variable in a register and use the register for the "g"
   constraint'd asm operand.  To avoid the latter problem, we add a
   constant offset to the operand and subtract it back in the asm code;
   forcing gcc to do arithmetic on the value is usually enough to get it
   to use a real constant value.  This is horrible, and ultimately
   unreliable too, but it seems to work for now (hopefully gcc will offer
   us more control in the future, so we can do a better job).  */

#define __const_bit_op(op, nr, addr)					\
  ({ __asm__ (op " (%0 - 0x123), %1"					\
	      :: "g" (((nr) & 0x7) + 0x123),				\
		 "m" (*((char *)(addr) + ((nr) >> 3)))			\
	      : "memory"); })
#define __var_bit_op(op, nr, addr)					\
  ({ int __nr = (nr);							\
     __asm__ (op " %0, [%1]"						\
	      :: "r" (__nr & 0x7),					\
		 "r" ((char *)(addr) + (__nr >> 3))			\
	      : "memory"); })
#define __bit_op(op, nr, addr)						\
  ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF)		\
   ? __const_bit_op (op, nr, addr)					\
   : __var_bit_op (op, nr, addr))

#define __set_bit(nr, addr)		__bit_op ("set1", nr, addr)
#define __clear_bit(nr, addr)		__bit_op ("clr1", nr, addr)
#define __change_bit(nr, addr)		__bit_op ("not1", nr, addr)

/* The bit instructions used by `non-atomic' variants are actually atomic.  */
#define set_bit __set_bit
#define clear_bit __clear_bit
#define change_bit __change_bit


#define __const_tns_bit_op(op, nr, addr)				      \
  ({ int __tns_res;							      \
     __asm__ __volatile__ (						      \
	     "tst1 (%1 - 0x123), %2; setf nz, %0; " op " (%1 - 0x123), %2"    \
	     : "=&r" (__tns_res)					      \
	     : "g" (((nr) & 0x7) + 0x123),				      \
	       "m" (*((char *)(addr) + ((nr) >> 3)))			      \
	     : "memory");						      \
     __tns_res;								      \
  })
#define __var_tns_bit_op(op, nr, addr)					      \
  ({ int __nr = (nr);							      \
     int __tns_res;							      \
     __asm__ __volatile__ (						      \
	     "tst1 %1, [%2]; setf nz, %0; " op " %1, [%2]"		      \
	      : "=&r" (__tns_res)					      \
	      : "r" (__nr & 0x7),					      \
		"r" ((char *)(addr) + (__nr >> 3))			      \
	      : "memory");						      \
     __tns_res;								      \
  })
#define __tns_bit_op(op, nr, addr)					\
  ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF)		\
   ? __const_tns_bit_op (op, nr, addr)					\
   : __var_tns_bit_op (op, nr, addr))
#define __tns_atomic_bit_op(op, nr, addr)				\
  ({ int __tns_atomic_res, __tns_atomic_flags;				\
     local_irq_save (__tns_atomic_flags);				\
     __tns_atomic_res = __tns_bit_op (op, nr, addr);			\
     local_irq_restore (__tns_atomic_flags);				\
     __tns_atomic_res;							\
  })

#define __test_and_set_bit(nr, addr)	__tns_bit_op ("set1", nr, addr)
#define test_and_set_bit(nr, addr)	__tns_atomic_bit_op ("set1", nr, addr)

#define __test_and_clear_bit(nr, addr)	__tns_bit_op ("clr1", nr, addr)
#define test_and_clear_bit(nr, addr)	__tns_atomic_bit_op ("clr1", nr, addr)

#define __test_and_change_bit(nr, addr)	__tns_bit_op ("not1", nr, addr)
#define test_and_change_bit(nr, addr)	__tns_atomic_bit_op ("not1", nr, addr)


#define __const_test_bit(nr, addr)					      \
  ({ int __test_bit_res;						      \
     __asm__ __volatile__ ("tst1 (%1 - 0x123), %2; setf nz, %0"		      \
			   : "=r" (__test_bit_res)			      \
			   : "g" (((nr) & 0x7) + 0x123),		      \
			     "m" (*((const char *)(addr) + ((nr) >> 3))));    \
     __test_bit_res;							      \
  })
138
static inline int __test_bit (int nr, const void *addr)
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
{
	int res;
	__asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0"
			      : "=r" (res)
			      : "r" (nr & 0x7), "r" (addr + (nr >> 3)));
	return res;
}
#define test_bit(nr,addr)						\
  ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF)		\
   ? __const_test_bit ((nr), (addr))					\
   : __test_bit ((nr), (addr)))


/* clear_bit doesn't provide any barrier for the compiler.  */
#define smp_mb__before_clear_bit()	barrier ()
#define smp_mb__after_clear_bit()	barrier ()


#define find_first_zero_bit(addr, size) \
  find_next_zero_bit ((addr), (size), 0)

160
static inline int find_next_zero_bit(const void *addr, int size, int offset)
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
{
	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
	unsigned long result = offset & ~31UL;
	unsigned long tmp;

	if (offset >= size)
		return size;
	size -= result;
	offset &= 31UL;
	if (offset) {
		tmp = * (p++);
		tmp |= ~0UL >> (32-offset);
		if (size < 32)
			goto found_first;
		if (~tmp)
			goto found_middle;
		size -= 32;
		result += 32;
	}
	while (size & ~31UL) {
		if (~ (tmp = * (p++)))
			goto found_middle;
		result += 32;
		size -= 32;
	}
	if (!size)
		return result;
	tmp = *p;

 found_first:
191
	tmp |= ~0UL << size;
L
Linus Torvalds 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
 found_middle:
	return result + ffz (tmp);
}


/* This is the same as generic_ffs, but we can't use that because it's
   inline and the #include order mucks things up.  */
static inline int generic_ffs_for_find_next_bit(int x)
{
	int r = 1;

	if (!x)
		return 0;
	if (!(x & 0xffff)) {
		x >>= 16;
		r += 16;
	}
	if (!(x & 0xff)) {
		x >>= 8;
		r += 8;
	}
	if (!(x & 0xf)) {
		x >>= 4;
		r += 4;
	}
	if (!(x & 3)) {
		x >>= 2;
		r += 2;
	}
	if (!(x & 1)) {
		x >>= 1;
		r += 1;
	}
	return r;
}

/*
 * Find next one bit in a bitmap reasonably efficiently.
 */
static __inline__ unsigned long find_next_bit(const unsigned long *addr,
	unsigned long size, unsigned long offset)
{
	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
	unsigned int result = offset & ~31UL;
	unsigned int tmp;

	if (offset >= size)
		return size;
	size -= result;
	offset &= 31UL;
	if (offset) {
		tmp = *p++;
		tmp &= ~0UL << offset;
		if (size < 32)
			goto found_first;
		if (tmp)
			goto found_middle;
		size -= 32;
		result += 32;
	}
	while (size >= 32) {
		if ((tmp = *p++) != 0)
			goto found_middle;
		result += 32;
		size -= 32;
	}
	if (!size)
		return result;
	tmp = *p;

found_first:
	tmp &= ~0UL >> (32 - size);
	if (tmp == 0UL)        /* Are any bits set? */
		return result + size; /* Nope. */
found_middle:
	return result + generic_ffs_for_find_next_bit(tmp);
}

/*
 * find_first_bit - find the first set bit in a memory region
 */
#define find_first_bit(addr, size) \
	find_next_bit((addr), (size), 0)


#define ffs(x) generic_ffs (x)
#define fls(x) generic_fls (x)
S
Stephen Hemminger 已提交
279
#define fls64(x) generic_fls64(x)
L
Linus Torvalds 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
#define __ffs(x) ffs(x)


/*
 * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes
 * that at least one bit is set, and returns the real index of the bit
 * (rather than the bit index + 1, like ffs does).
 */
static inline int sched_ffs(int x)
{
	int r = 0;

	if (!(x & 0xffff)) {
		x >>= 16;
		r += 16;
	}
	if (!(x & 0xff)) {
		x >>= 8;
		r += 8;
	}
	if (!(x & 0xf)) {
		x >>= 4;
		r += 4;
	}
	if (!(x & 3)) {
		x >>= 2;
		r += 2;
	}
	if (!(x & 1)) {
		x >>= 1;
		r += 1;
	}
	return r;
}

/*
 * Every architecture must define this function. It's the fastest
 * way of searching a 140-bit bitmap where the first 100 bits are
 * unlikely to be set. It's guaranteed that at least one of the 140
 * bits is set.
 */
static inline int sched_find_first_bit(unsigned long *b)
{
	unsigned offs = 0;
	while (! *b) {
		b++;
		offs += 32;
	}
	return sched_ffs (*b) + offs;
}

/*
 * hweightN: returns the hamming weight (i.e. the number
 * of bits set) of a N-bit word
 */
#define hweight32(x) 			generic_hweight32 (x)
#define hweight16(x) 			generic_hweight16 (x)
#define hweight8(x) 			generic_hweight8 (x)

339
#define ext2_set_bit			__test_and_set_bit
L
Linus Torvalds 已提交
340
#define ext2_set_bit_atomic(l,n,a)      test_and_set_bit(n,a)
341
#define ext2_clear_bit			__test_and_clear_bit
L
Linus Torvalds 已提交
342 343 344 345 346 347
#define ext2_clear_bit_atomic(l,n,a)    test_and_clear_bit(n,a)
#define ext2_test_bit			test_bit
#define ext2_find_first_zero_bit	find_first_zero_bit
#define ext2_find_next_zero_bit		find_next_zero_bit

/* Bitmap functions for the minix filesystem.  */
348 349 350
#define minix_test_and_set_bit		__test_and_set_bit
#define minix_set_bit			__set_bit
#define minix_test_and_clear_bit	__test_and_clear_bit
L
Linus Torvalds 已提交
351 352 353 354 355 356
#define minix_test_bit 			test_bit
#define minix_find_first_zero_bit 	find_first_zero_bit

#endif /* __KERNEL__ */

#endif /* __V850_BITOPS_H__ */