percpu_counter.c 5.0 KB
Newer Older
1 2 3 4 5
/*
 * Fast batching percpu counters.
 */

#include <linux/percpu_counter.h>
6 7 8 9
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
10
#include <linux/module.h>
T
Tejun Heo 已提交
11
#include <linux/debugobjects.h>
12

13 14 15
static LIST_HEAD(percpu_counters);
static DEFINE_MUTEX(percpu_counters_lock);

T
Tejun Heo 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER

static struct debug_obj_descr percpu_counter_debug_descr;

static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
	struct percpu_counter *fbc = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		percpu_counter_destroy(fbc);
		debug_object_free(fbc, &percpu_counter_debug_descr);
		return 1;
	default:
		return 0;
	}
}

static struct debug_obj_descr percpu_counter_debug_descr = {
	.name		= "percpu_counter",
	.fixup_free	= percpu_counter_fixup_free,
};

static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{
	debug_object_init(fbc, &percpu_counter_debug_descr);
	debug_object_activate(fbc, &percpu_counter_debug_descr);
}

static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{
	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
	debug_object_free(fbc, &percpu_counter_debug_descr);
}

#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{ }
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{ }
#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */

P
Peter Zijlstra 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
	int cpu;

	spin_lock(&fbc->lock);
	for_each_possible_cpu(cpu) {
		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
		*pcount = 0;
	}
	fbc->count = amount;
	spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);

72
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
73
{
74
	s64 count;
75
	s32 *pcount;
76 77 78 79
	int cpu = get_cpu();

	pcount = per_cpu_ptr(fbc->counters, cpu);
	count = *pcount + amount;
80
	if (count >= batch || count <= -batch) {
81 82 83 84 85 86 87 88 89
		spin_lock(&fbc->lock);
		fbc->count += count;
		*pcount = 0;
		spin_unlock(&fbc->lock);
	} else {
		*pcount = count;
	}
	put_cpu();
}
90
EXPORT_SYMBOL(__percpu_counter_add);
91 92 93 94 95

/*
 * Add up all the per-cpu counts, return the result.  This is a more accurate
 * but much slower version of percpu_counter_read_positive()
 */
96
s64 __percpu_counter_sum(struct percpu_counter *fbc)
97
{
98
	s64 ret;
99 100 101 102
	int cpu;

	spin_lock(&fbc->lock);
	ret = fbc->count;
103
	for_each_online_cpu(cpu) {
104
		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
105 106 107
		ret += *pcount;
	}
	spin_unlock(&fbc->lock);
P
Peter Zijlstra 已提交
108
	return ret;
109
}
P
Peter Zijlstra 已提交
110
EXPORT_SYMBOL(__percpu_counter_sum);
111

112 113
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
			  struct lock_class_key *key)
114 115
{
	spin_lock_init(&fbc->lock);
116
	lockdep_set_class(&fbc->lock, key);
117 118
	fbc->count = amount;
	fbc->counters = alloc_percpu(s32);
119 120
	if (!fbc->counters)
		return -ENOMEM;
T
Tejun Heo 已提交
121 122 123

	debug_percpu_counter_activate(fbc);

124
#ifdef CONFIG_HOTPLUG_CPU
125
	INIT_LIST_HEAD(&fbc->list);
126 127 128 129
	mutex_lock(&percpu_counters_lock);
	list_add(&fbc->list, &percpu_counters);
	mutex_unlock(&percpu_counters_lock);
#endif
130
	return 0;
131
}
132
EXPORT_SYMBOL(__percpu_counter_init);
133 134 135

void percpu_counter_destroy(struct percpu_counter *fbc)
{
136 137 138
	if (!fbc->counters)
		return;

T
Tejun Heo 已提交
139 140
	debug_percpu_counter_deactivate(fbc);

141 142 143 144 145
#ifdef CONFIG_HOTPLUG_CPU
	mutex_lock(&percpu_counters_lock);
	list_del(&fbc->list);
	mutex_unlock(&percpu_counters_lock);
#endif
146 147
	free_percpu(fbc->counters);
	fbc->counters = NULL;
148 149 150
}
EXPORT_SYMBOL(percpu_counter_destroy);

151 152 153 154 155 156 157 158 159 160
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);

static void compute_batch_value(void)
{
	int nr = num_online_cpus();

	percpu_counter_batch = max(32, nr*2);
}

161 162 163
static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
					unsigned long action, void *hcpu)
{
164
#ifdef CONFIG_HOTPLUG_CPU
165 166 167
	unsigned int cpu;
	struct percpu_counter *fbc;

168
	compute_batch_value();
169 170 171 172 173 174 175
	if (action != CPU_DEAD)
		return NOTIFY_OK;

	cpu = (unsigned long)hcpu;
	mutex_lock(&percpu_counters_lock);
	list_for_each_entry(fbc, &percpu_counters, list) {
		s32 *pcount;
176
		unsigned long flags;
177

178
		spin_lock_irqsave(&fbc->lock, flags);
179 180 181
		pcount = per_cpu_ptr(fbc->counters, cpu);
		fbc->count += *pcount;
		*pcount = 0;
182
		spin_unlock_irqrestore(&fbc->lock, flags);
183 184
	}
	mutex_unlock(&percpu_counters_lock);
185
#endif
186 187 188
	return NOTIFY_OK;
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
/*
 * Compare counter against given value.
 * Return 1 if greater, 0 if equal and -1 if less
 */
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
	s64	count;

	count = percpu_counter_read(fbc);
	/* Check to see if rough count will be sufficient for comparison */
	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
		if (count > rhs)
			return 1;
		else
			return -1;
	}
	/* Need to use precise count */
	count = percpu_counter_sum(fbc);
	if (count > rhs)
		return 1;
	else if (count < rhs)
		return -1;
	else
		return 0;
}
EXPORT_SYMBOL(percpu_counter_compare);

216 217
static int __init percpu_counter_startup(void)
{
218
	compute_batch_value();
219 220 221 222
	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
	return 0;
}
module_init(percpu_counter_startup);