bitops_32.h 4.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#ifndef _ASM_TILE_BITOPS_32_H
#define _ASM_TILE_BITOPS_32_H

#include <linux/compiler.h>
C
Chris Metcalf 已提交
19
#include <asm/barrier.h>
20 21

/* Tile-specific routines to support <asm/bitops.h>. */
22 23 24
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
25 26 27 28 29 30 31 32 33 34 35 36 37

/**
 * set_bit - Atomically set a bit in memory
 * @nr: the bit to set
 * @addr: the address to start counting from
 *
 * This function is atomic and may not be reordered.
 * See __set_bit() if you do not require the atomic guarantees.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
{
38
	_atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
39 40 41 42 43 44 45 46 47 48 49 50 51
}

/**
 * clear_bit - Clears a bit in memory
 * @nr: Bit to clear
 * @addr: Address to start counting from
 *
 * clear_bit() is atomic and may not be reordered.
 * See __clear_bit() if you do not require the atomic guarantees.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 *
 * clear_bit() may not contain a memory barrier, so if it is used for
P
Peter Zijlstra 已提交
52 53
 * locking purposes, you should call smp_mb__before_atomic() and/or
 * smp_mb__after_atomic() to ensure changes are visible on other cpus.
54 55 56
 */
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
{
57
	_atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
58 59 60 61 62 63 64 65 66 67 68 69 70 71
}

/**
 * change_bit - Toggle a bit in memory
 * @nr: Bit to change
 * @addr: Address to start counting from
 *
 * change_bit() is atomic and may not be reordered.
 * See __change_bit() if you do not require the atomic guarantees.
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{
72
	_atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
}

/**
 * test_and_set_bit - Set a bit and return its old value
 * @nr: Bit to set
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.
 * It also implies a memory barrier.
 */
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
{
	unsigned long mask = BIT_MASK(nr);
	addr += BIT_WORD(nr);
	smp_mb();  /* barrier for proper semantics */
88
	return (_atomic_fetch_or(addr, mask) & mask) != 0;
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
}

/**
 * test_and_clear_bit - Clear a bit and return its old value
 * @nr: Bit to clear
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.
 * It also implies a memory barrier.
 */
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
{
	unsigned long mask = BIT_MASK(nr);
	addr += BIT_WORD(nr);
	smp_mb();  /* barrier for proper semantics */
104
	return (_atomic_fetch_andn(addr, mask) & mask) != 0;
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
}

/**
 * test_and_change_bit - Change a bit and return its old value
 * @nr: Bit to change
 * @addr: Address to count from
 *
 * This operation is atomic and cannot be reordered.
 * It also implies a memory barrier.
 */
static inline int test_and_change_bit(unsigned nr,
				      volatile unsigned long *addr)
{
	unsigned long mask = BIT_MASK(nr);
	addr += BIT_WORD(nr);
	smp_mb();  /* barrier for proper semantics */
121
	return (_atomic_fetch_xor(addr, mask) & mask) != 0;
122 123 124 125 126
}

#include <asm-generic/bitops/ext2-atomic.h>

#endif /* _ASM_TILE_BITOPS_32_H */