blk-stat.c 5.5 KB
Newer Older
1 2 3 4 5 6
/*
 * Block stat tracking code
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
7
#include <linux/rculist.h>
8 9 10 11
#include <linux/blk-mq.h>

#include "blk-stat.h"
#include "blk-mq.h"
12
#include "blk.h"
13

14 15
#define BLK_RQ_STAT_BATCH	64

16 17 18
struct blk_queue_stats {
	struct list_head callbacks;
	spinlock_t lock;
19
	bool enable_accounting;
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
};

unsigned int blk_stat_rq_ddir(const struct request *rq)
{
	return rq_data_dir(rq);
}
EXPORT_SYMBOL_GPL(blk_stat_rq_ddir);

static void blk_stat_init(struct blk_rq_stat *stat)
{
	stat->min = -1ULL;
	stat->max = stat->nr_samples = stat->mean = 0;
	stat->batch = stat->nr_batch = 0;
}

35 36 37
static void blk_stat_flush_batch(struct blk_rq_stat *stat)
{
	const s32 nr_batch = READ_ONCE(stat->nr_batch);
S
Shaohua Li 已提交
38
	const s32 nr_samples = READ_ONCE(stat->nr_samples);
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

	if (!nr_batch)
		return;
	if (!nr_samples)
		stat->mean = div64_s64(stat->batch, nr_batch);
	else {
		stat->mean = div64_s64((stat->mean * nr_samples) +
					stat->batch,
					nr_batch + nr_samples);
	}

	stat->nr_samples += nr_batch;
	stat->nr_batch = stat->batch = 0;
}

static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
56 57
	blk_stat_flush_batch(src);

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	if (!src->nr_samples)
		return;

	dst->min = min(dst->min, src->min);
	dst->max = max(dst->max, src->max);

	if (!dst->nr_samples)
		dst->mean = src->mean;
	else {
		dst->mean = div64_s64((src->mean * src->nr_samples) +
					(dst->mean * dst->nr_samples),
					dst->nr_samples + src->nr_samples);
	}
	dst->nr_samples += src->nr_samples;
}

74
static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
75
{
76 77
	stat->min = min(stat->min, value);
	stat->max = max(stat->max, value);
78

79 80 81
	if (stat->batch + value < stat->batch ||
	    stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
		blk_stat_flush_batch(stat);
82

83 84
	stat->batch += value;
	stat->nr_batch++;
85 86
}

87
void blk_stat_add(struct request *rq)
88
{
89 90 91 92 93 94 95 96 97 98 99 100
	struct request_queue *q = rq->q;
	struct blk_stat_callback *cb;
	struct blk_rq_stat *stat;
	int bucket;
	s64 now, value;

	now = __blk_stat_time(ktime_to_ns(ktime_get()));
	if (now < blk_stat_time(&rq->issue_stat))
		return;

	value = now - blk_stat_time(&rq->issue_stat);

101 102
	blk_throtl_stat_add(rq, value);

103 104 105 106 107 108 109
	rcu_read_lock();
	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
		if (blk_stat_is_active(cb)) {
			bucket = cb->bucket_fn(rq);
			stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
			__blk_stat_add(stat, value);
		}
110
	}
111
	rcu_read_unlock();
112 113
}

114
static void blk_stat_timer_fn(unsigned long data)
115
{
116 117 118
	struct blk_stat_callback *cb = (void *)data;
	unsigned int bucket;
	int cpu;
119

120 121
	for (bucket = 0; bucket < cb->buckets; bucket++)
		blk_stat_init(&cb->stat[bucket]);
122

123 124
	for_each_online_cpu(cpu) {
		struct blk_rq_stat *cpu_stat;
125

126 127 128 129
		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
		for (bucket = 0; bucket < cb->buckets; bucket++) {
			blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
			blk_stat_init(&cpu_stat[bucket]);
130
		}
131
	}
132

133
	cb->timer_fn(cb);
134 135
}

136 137 138 139
struct blk_stat_callback *
blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
			unsigned int (*bucket_fn)(const struct request *),
			unsigned int buckets, void *data)
140
{
141
	struct blk_stat_callback *cb;
142

143 144 145
	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
	if (!cb)
		return NULL;
146

147 148 149 150 151 152 153 154 155 156 157 158 159
	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
				 GFP_KERNEL);
	if (!cb->stat) {
		kfree(cb);
		return NULL;
	}
	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
				      __alignof__(struct blk_rq_stat));
	if (!cb->cpu_stat) {
		kfree(cb->stat);
		kfree(cb);
		return NULL;
	}
160

161 162 163 164 165 166 167
	cb->timer_fn = timer_fn;
	cb->bucket_fn = bucket_fn;
	cb->data = data;
	cb->buckets = buckets;
	setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);

	return cb;
168
}
169
EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
170

171 172
void blk_stat_add_callback(struct request_queue *q,
			   struct blk_stat_callback *cb)
173
{
174 175
	unsigned int bucket;
	int cpu;
176

177 178
	for_each_possible_cpu(cpu) {
		struct blk_rq_stat *cpu_stat;
179

180 181 182 183
		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
		for (bucket = 0; bucket < cb->buckets; bucket++)
			blk_stat_init(&cpu_stat[bucket]);
	}
184

185 186 187 188 189 190
	spin_lock(&q->stats->lock);
	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
	set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
}
EXPORT_SYMBOL_GPL(blk_stat_add_callback);
191

192 193 194 195 196
void blk_stat_remove_callback(struct request_queue *q,
			      struct blk_stat_callback *cb)
{
	spin_lock(&q->stats->lock);
	list_del_rcu(&cb->list);
197
	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
198 199
		clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
200

201
	del_timer_sync(&cb->timer);
202
}
203
EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
204

205
static void blk_stat_free_callback_rcu(struct rcu_head *head)
206
{
207 208 209 210 211 212
	struct blk_stat_callback *cb;

	cb = container_of(head, struct blk_stat_callback, rcu);
	free_percpu(cb->cpu_stat);
	kfree(cb->stat);
	kfree(cb);
213 214
}

215
void blk_stat_free_callback(struct blk_stat_callback *cb)
216
{
217 218
	if (cb)
		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
219
}
220
EXPORT_SYMBOL_GPL(blk_stat_free_callback);
221

222 223 224 225 226 227 228 229
void blk_stat_enable_accounting(struct request_queue *q)
{
	spin_lock(&q->stats->lock);
	q->stats->enable_accounting = true;
	set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
}

230
struct blk_queue_stats *blk_alloc_queue_stats(void)
231
{
232 233 234 235 236 237 238 239
	struct blk_queue_stats *stats;

	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
	if (!stats)
		return NULL;

	INIT_LIST_HEAD(&stats->callbacks);
	spin_lock_init(&stats->lock);
240
	stats->enable_accounting = false;
241 242 243 244 245 246 247 248 249 250

	return stats;
}

void blk_free_queue_stats(struct blk_queue_stats *stats)
{
	if (!stats)
		return;

	WARN_ON(!list_empty(&stats->callbacks));
251

252
	kfree(stats);
253
}