rcuclassic.h 5.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * Read-Copy Update mechanism for mutual exclusion (classic version)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright IBM Corporation, 2001
 *
 * Author: Dipankar Sarma <dipankar@in.ibm.com>
 *
 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 * Papers:
 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 * 		Documentation/RCU
 *
 */

#ifndef __LINUX_RCUCLASSIC_H
#define __LINUX_RCUCLASSIC_H

#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>

42
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
43
#define RCU_SECONDS_TILL_STALL_CHECK	(10 * HZ) /* for rcp->jiffies_stall */
44 45
#define RCU_SECONDS_TILL_STALL_RECHECK	(30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
46 47 48 49 50

/* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk {
	long	cur;		/* Current batch number.                      */
	long	completed;	/* Number of the last completed batch         */
51
	long	pending;	/* Number of the last pending batch           */
52 53 54 55 56
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
	unsigned long gp_start;	/* Time at which GP started in jiffies. */
	unsigned long jiffies_stall;
				/* Time at which to check for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
57 58 59 60

	int	signaled;

	spinlock_t	lock	____cacheline_internodealigned_in_smp;
61 62
	DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
					  /* current batch to proceed.     */
63 64 65 66 67 68 69 70 71 72 73 74 75 76
} ____cacheline_internodealigned_in_smp;

/* Is batch a before batch b ? */
static inline int rcu_batch_before(long a, long b)
{
	return (a - b) < 0;
}

/* Is batch a after batch b ? */
static inline int rcu_batch_after(long a, long b)
{
	return (a - b) > 0;
}

77
/* Per-CPU data for Read-Copy UPdate. */
78 79 80 81 82 83 84
struct rcu_data {
	/* 1) quiescent state handling : */
	long		quiescbatch;     /* Batch # for grace period */
	int		passed_quiesc;	 /* User-mode/idle loop etc. */
	int		qs_pending;	 /* core waits for quiesc state */

	/* 2) batch handling */
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	/*
	 * if nxtlist is not NULL, then:
	 * batch:
	 *	The batch # for the last entry of nxtlist
	 * [*nxttail[1], NULL = *nxttail[2]):
	 *	Entries that batch # <= batch
	 * [*nxttail[0], *nxttail[1]):
	 *	Entries that batch # <= batch - 1
	 * [nxtlist, *nxttail[0]):
	 *	Entries that batch # <= batch - 2
	 *	The grace period for these entries has completed, and
	 *	the other grace-period-completed entries may be moved
	 *	here temporarily in rcu_process_callbacks().
	 */
	long  	       	batch;
100
	struct rcu_head *nxtlist;
101
	struct rcu_head **nxttail[3];
102 103 104 105 106 107 108 109 110 111 112 113 114 115
	long            qlen; 	 	 /* # of queued callbacks */
	struct rcu_head *donelist;
	struct rcu_head **donetail;
	long		blimit;		 /* Upper limit on a processed batch */
	int cpu;
	struct rcu_head barrier;
};

/*
 * Increment the quiescent state counter.
 * The counter is a bit degenerated: We do not need to know
 * how many quiescent states passed, just if there was at least
 * one since the start of the grace period. Thus just a flag.
 */
116 117
extern void rcu_qsctr_inc(int cpu);
extern void rcu_bh_qsctr_inc(int cpu);
118 119 120 121 122 123 124

extern int rcu_pending(int cpu);
extern int rcu_needs_cpu(int cpu);

#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern struct lockdep_map rcu_lock_map;
# define rcu_read_acquire()	\
P
Peter Zijlstra 已提交
125
			lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
# define rcu_read_release()	lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else
# define rcu_read_acquire()	do { } while (0)
# define rcu_read_release()	do { } while (0)
#endif

#define __rcu_read_lock() \
	do { \
		preempt_disable(); \
		__acquire(RCU); \
		rcu_read_acquire(); \
	} while (0)
#define __rcu_read_unlock() \
	do { \
		rcu_read_release(); \
		__release(RCU); \
		preempt_enable(); \
	} while (0)
#define __rcu_read_lock_bh() \
	do { \
		local_bh_disable(); \
		__acquire(RCU_BH); \
		rcu_read_acquire(); \
	} while (0)
#define __rcu_read_unlock_bh() \
	do { \
		rcu_read_release(); \
		__release(RCU_BH); \
		local_bh_enable(); \
	} while (0)

#define __synchronize_sched() synchronize_rcu()

P
Paul E. McKenney 已提交
159 160
#define call_rcu_sched(head, func) call_rcu(head, func)

161
extern void __rcu_init(void);
P
Paul E. McKenney 已提交
162
#define rcu_init_sched()	do { } while (0)
163 164 165
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);

P
Paul E. McKenney 已提交
166 167 168
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);

169 170 171
#define rcu_enter_nohz()	do { } while (0)
#define rcu_exit_nohz()		do { } while (0)

172 173 174 175 176 177
/* A context switch is a grace period for rcuclassic. */
static inline int rcu_blocking_is_gp(void)
{
	return num_online_cpus() == 1;
}

178
#endif /* __LINUX_RCUCLASSIC_H */