blk-stat.c 5.5 KB
Newer Older
1 2 3 4 5 6
/*
 * Block stat tracking code
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
7
#include <linux/rculist.h>
8 9 10 11
#include <linux/blk-mq.h>

#include "blk-stat.h"
#include "blk-mq.h"
12
#include "blk.h"
13

14 15
#define BLK_RQ_STAT_BATCH	64

16 17 18
struct blk_queue_stats {
	struct list_head callbacks;
	spinlock_t lock;
19
	bool enable_accounting;
20 21 22 23 24 25 26 27 28
};

static void blk_stat_init(struct blk_rq_stat *stat)
{
	stat->min = -1ULL;
	stat->max = stat->nr_samples = stat->mean = 0;
	stat->batch = stat->nr_batch = 0;
}

29 30 31
static void blk_stat_flush_batch(struct blk_rq_stat *stat)
{
	const s32 nr_batch = READ_ONCE(stat->nr_batch);
S
Shaohua Li 已提交
32
	const s32 nr_samples = READ_ONCE(stat->nr_samples);
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49

	if (!nr_batch)
		return;
	if (!nr_samples)
		stat->mean = div64_s64(stat->batch, nr_batch);
	else {
		stat->mean = div64_s64((stat->mean * nr_samples) +
					stat->batch,
					nr_batch + nr_samples);
	}

	stat->nr_samples += nr_batch;
	stat->nr_batch = stat->batch = 0;
}

static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
50 51
	blk_stat_flush_batch(src);

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
	if (!src->nr_samples)
		return;

	dst->min = min(dst->min, src->min);
	dst->max = max(dst->max, src->max);

	if (!dst->nr_samples)
		dst->mean = src->mean;
	else {
		dst->mean = div64_s64((src->mean * src->nr_samples) +
					(dst->mean * dst->nr_samples),
					dst->nr_samples + src->nr_samples);
	}
	dst->nr_samples += src->nr_samples;
}

68
static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
69
{
70 71
	stat->min = min(stat->min, value);
	stat->max = max(stat->max, value);
72

73 74 75
	if (stat->batch + value < stat->batch ||
	    stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
		blk_stat_flush_batch(stat);
76

77 78
	stat->batch += value;
	stat->nr_batch++;
79 80
}

81
void blk_stat_add(struct request *rq)
82
{
83 84 85 86 87 88 89 90 91 92 93 94
	struct request_queue *q = rq->q;
	struct blk_stat_callback *cb;
	struct blk_rq_stat *stat;
	int bucket;
	s64 now, value;

	now = __blk_stat_time(ktime_to_ns(ktime_get()));
	if (now < blk_stat_time(&rq->issue_stat))
		return;

	value = now - blk_stat_time(&rq->issue_stat);

95 96
	blk_throtl_stat_add(rq, value);

97 98
	rcu_read_lock();
	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
99 100 101 102 103 104 105 106 107 108
		if (!blk_stat_is_active(cb))
			continue;

		bucket = cb->bucket_fn(rq);
		if (bucket < 0)
			continue;

		stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
		__blk_stat_add(stat, value);
		put_cpu_ptr(cb->cpu_stat);
109
	}
110
	rcu_read_unlock();
111 112
}

113
static void blk_stat_timer_fn(unsigned long data)
114
{
115 116 117
	struct blk_stat_callback *cb = (void *)data;
	unsigned int bucket;
	int cpu;
118

119 120
	for (bucket = 0; bucket < cb->buckets; bucket++)
		blk_stat_init(&cb->stat[bucket]);
121

122 123
	for_each_online_cpu(cpu) {
		struct blk_rq_stat *cpu_stat;
124

125 126 127 128
		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
		for (bucket = 0; bucket < cb->buckets; bucket++) {
			blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
			blk_stat_init(&cpu_stat[bucket]);
129
		}
130
	}
131

132
	cb->timer_fn(cb);
133 134
}

135 136
struct blk_stat_callback *
blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
137
			int (*bucket_fn)(const struct request *),
138
			unsigned int buckets, void *data)
139
{
140
	struct blk_stat_callback *cb;
141

142 143 144
	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
	if (!cb)
		return NULL;
145

146 147 148 149 150 151 152 153 154 155 156 157 158
	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
				 GFP_KERNEL);
	if (!cb->stat) {
		kfree(cb);
		return NULL;
	}
	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
				      __alignof__(struct blk_rq_stat));
	if (!cb->cpu_stat) {
		kfree(cb->stat);
		kfree(cb);
		return NULL;
	}
159

160 161 162 163 164 165 166
	cb->timer_fn = timer_fn;
	cb->bucket_fn = bucket_fn;
	cb->data = data;
	cb->buckets = buckets;
	setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);

	return cb;
167
}
168
EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
169

170 171
void blk_stat_add_callback(struct request_queue *q,
			   struct blk_stat_callback *cb)
172
{
173 174
	unsigned int bucket;
	int cpu;
175

176 177
	for_each_possible_cpu(cpu) {
		struct blk_rq_stat *cpu_stat;
178

179 180 181 182
		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
		for (bucket = 0; bucket < cb->buckets; bucket++)
			blk_stat_init(&cpu_stat[bucket]);
	}
183

184 185 186 187 188 189
	spin_lock(&q->stats->lock);
	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
	set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
}
EXPORT_SYMBOL_GPL(blk_stat_add_callback);
190

191 192 193 194 195
void blk_stat_remove_callback(struct request_queue *q,
			      struct blk_stat_callback *cb)
{
	spin_lock(&q->stats->lock);
	list_del_rcu(&cb->list);
196
	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
197 198
		clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
199

200
	del_timer_sync(&cb->timer);
201
}
202
EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
203

204
static void blk_stat_free_callback_rcu(struct rcu_head *head)
205
{
206 207 208 209 210 211
	struct blk_stat_callback *cb;

	cb = container_of(head, struct blk_stat_callback, rcu);
	free_percpu(cb->cpu_stat);
	kfree(cb->stat);
	kfree(cb);
212 213
}

214
void blk_stat_free_callback(struct blk_stat_callback *cb)
215
{
216 217
	if (cb)
		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
218
}
219
EXPORT_SYMBOL_GPL(blk_stat_free_callback);
220

221 222 223 224 225 226 227 228
void blk_stat_enable_accounting(struct request_queue *q)
{
	spin_lock(&q->stats->lock);
	q->stats->enable_accounting = true;
	set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
}

229
struct blk_queue_stats *blk_alloc_queue_stats(void)
230
{
231 232 233 234 235 236 237 238
	struct blk_queue_stats *stats;

	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
	if (!stats)
		return NULL;

	INIT_LIST_HEAD(&stats->callbacks);
	spin_lock_init(&stats->lock);
239
	stats->enable_accounting = false;
240 241 242 243 244 245 246 247 248 249

	return stats;
}

void blk_free_queue_stats(struct blk_queue_stats *stats)
{
	if (!stats)
		return;

	WARN_ON(!list_empty(&stats->callbacks));
250

251
	kfree(stats);
252
}