blk-stat.c 5.2 KB
Newer Older
1 2 3 4 5 6
/*
 * Block stat tracking code
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
7
#include <linux/rculist.h>
8 9 10 11 12
#include <linux/blk-mq.h>

#include "blk-stat.h"
#include "blk-mq.h"

13 14
#define BLK_RQ_STAT_BATCH	64

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
struct blk_queue_stats {
	struct list_head callbacks;
	spinlock_t lock;
};

unsigned int blk_stat_rq_ddir(const struct request *rq)
{
	return rq_data_dir(rq);
}
EXPORT_SYMBOL_GPL(blk_stat_rq_ddir);

static void blk_stat_init(struct blk_rq_stat *stat)
{
	stat->min = -1ULL;
	stat->max = stat->nr_samples = stat->mean = 0;
	stat->batch = stat->nr_batch = 0;
}

33 34 35
static void blk_stat_flush_batch(struct blk_rq_stat *stat)
{
	const s32 nr_batch = READ_ONCE(stat->nr_batch);
S
Shaohua Li 已提交
36
	const s32 nr_samples = READ_ONCE(stat->nr_samples);
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53

	if (!nr_batch)
		return;
	if (!nr_samples)
		stat->mean = div64_s64(stat->batch, nr_batch);
	else {
		stat->mean = div64_s64((stat->mean * nr_samples) +
					stat->batch,
					nr_batch + nr_samples);
	}

	stat->nr_samples += nr_batch;
	stat->nr_batch = stat->batch = 0;
}

static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
54 55
	blk_stat_flush_batch(src);

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
	if (!src->nr_samples)
		return;

	dst->min = min(dst->min, src->min);
	dst->max = max(dst->max, src->max);

	if (!dst->nr_samples)
		dst->mean = src->mean;
	else {
		dst->mean = div64_s64((src->mean * src->nr_samples) +
					(dst->mean * dst->nr_samples),
					dst->nr_samples + src->nr_samples);
	}
	dst->nr_samples += src->nr_samples;
}

72
static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
73
{
74 75
	stat->min = min(stat->min, value);
	stat->max = max(stat->max, value);
76

77 78 79
	if (stat->batch + value < stat->batch ||
	    stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
		blk_stat_flush_batch(stat);
80

81 82
	stat->batch += value;
	stat->nr_batch++;
83 84
}

85
void blk_stat_add(struct request *rq)
86
{
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	struct request_queue *q = rq->q;
	struct blk_stat_callback *cb;
	struct blk_rq_stat *stat;
	int bucket;
	s64 now, value;

	now = __blk_stat_time(ktime_to_ns(ktime_get()));
	if (now < blk_stat_time(&rq->issue_stat))
		return;

	value = now - blk_stat_time(&rq->issue_stat);

	rcu_read_lock();
	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
		if (blk_stat_is_active(cb)) {
			bucket = cb->bucket_fn(rq);
			stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
			__blk_stat_add(stat, value);
		}
106
	}
107
	rcu_read_unlock();
108 109
}

110
static void blk_stat_timer_fn(unsigned long data)
111
{
112 113 114
	struct blk_stat_callback *cb = (void *)data;
	unsigned int bucket;
	int cpu;
115

116 117
	for (bucket = 0; bucket < cb->buckets; bucket++)
		blk_stat_init(&cb->stat[bucket]);
118

119 120
	for_each_online_cpu(cpu) {
		struct blk_rq_stat *cpu_stat;
121

122 123 124 125
		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
		for (bucket = 0; bucket < cb->buckets; bucket++) {
			blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
			blk_stat_init(&cpu_stat[bucket]);
126
		}
127
	}
128

129
	cb->timer_fn(cb);
130 131
}

132 133 134 135
struct blk_stat_callback *
blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
			unsigned int (*bucket_fn)(const struct request *),
			unsigned int buckets, void *data)
136
{
137
	struct blk_stat_callback *cb;
138

139 140 141
	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
	if (!cb)
		return NULL;
142

143 144 145 146 147 148 149 150 151 152 153 154 155
	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
				 GFP_KERNEL);
	if (!cb->stat) {
		kfree(cb);
		return NULL;
	}
	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
				      __alignof__(struct blk_rq_stat));
	if (!cb->cpu_stat) {
		kfree(cb->stat);
		kfree(cb);
		return NULL;
	}
156

157 158 159 160 161 162 163
	cb->timer_fn = timer_fn;
	cb->bucket_fn = bucket_fn;
	cb->data = data;
	cb->buckets = buckets;
	setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);

	return cb;
164
}
165
EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
166

167 168
void blk_stat_add_callback(struct request_queue *q,
			   struct blk_stat_callback *cb)
169
{
170 171
	unsigned int bucket;
	int cpu;
172

173 174
	for_each_possible_cpu(cpu) {
		struct blk_rq_stat *cpu_stat;
175

176 177 178 179
		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
		for (bucket = 0; bucket < cb->buckets; bucket++)
			blk_stat_init(&cpu_stat[bucket]);
	}
180

181 182 183 184 185 186
	spin_lock(&q->stats->lock);
	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
	set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
}
EXPORT_SYMBOL_GPL(blk_stat_add_callback);
187

188 189 190 191 192 193 194 195
void blk_stat_remove_callback(struct request_queue *q,
			      struct blk_stat_callback *cb)
{
	spin_lock(&q->stats->lock);
	list_del_rcu(&cb->list);
	if (list_empty(&q->stats->callbacks))
		clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
	spin_unlock(&q->stats->lock);
196

197
	del_timer_sync(&cb->timer);
198
}
199
EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
200

201
static void blk_stat_free_callback_rcu(struct rcu_head *head)
202
{
203 204 205 206 207 208
	struct blk_stat_callback *cb;

	cb = container_of(head, struct blk_stat_callback, rcu);
	free_percpu(cb->cpu_stat);
	kfree(cb->stat);
	kfree(cb);
209 210
}

211
void blk_stat_free_callback(struct blk_stat_callback *cb)
212
{
213
	call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
214
}
215
EXPORT_SYMBOL_GPL(blk_stat_free_callback);
216

217
struct blk_queue_stats *blk_alloc_queue_stats(void)
218
{
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	struct blk_queue_stats *stats;

	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
	if (!stats)
		return NULL;

	INIT_LIST_HEAD(&stats->callbacks);
	spin_lock_init(&stats->lock);

	return stats;
}

void blk_free_queue_stats(struct blk_queue_stats *stats)
{
	if (!stats)
		return;

	WARN_ON(!list_empty(&stats->callbacks));
237

238
	kfree(stats);
239
}